]>
Commit | Line | Data |
---|---|---|
a98c648e AP |
1 | #!/usr/bin/env perl |
2 | # | |
3 | # ==================================================================== | |
4 | # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL | |
5 | # project. The module is, however, dual licensed under OpenSSL and | |
6 | # CRYPTOGAMS licenses depending on where you obtain it. For further | |
7 | # details see http://www.openssl.org/~appro/cryptogams/. | |
8 | # ==================================================================== | |
9 | # | |
10 | # This module implements Poly1305 hash for x86_64. | |
11 | # | |
12 | # March 2015 | |
13 | # | |
14 | # Numbers are cycles per processed byte with poly1305_blocks alone, | |
15 | # measured with rdtsc at fixed clock frequency. | |
16 | # | |
17 | # IALU/gcc-4.8(*) AVX(**) AVX2 | |
18 | # P4 4.90/+120% - | |
19 | # Core 2 2.39/+90% - | |
20 | # Westmere 1.86/+120% - | |
21 | # Sandy Bridge 1.39/+140% 1.10 | |
22 | # Haswell 1.10/+175% 1.11 0.65 | |
23 | # Skylake 1.12/+120% 0.96 0.51 | |
24 | # Silvermont 2.83/+95% - | |
25 | # VIA Nano 1.82/+150% - | |
26 | # Sledgehammer 1.38/+160% - | |
27 | # Bulldozer 2.21/+130% 0.97 | |
28 | # | |
29 | # (*) improvement coefficients relative to clang are more modest and | |
30 | # are ~50% on most processors, in both cases we are comparing to | |
31 | # __int128 code; | |
32 | # (**) SSE2 implementation was attempted, but among non-AVX processors | |
33 | # it was faster than integer-only code only on older Intel P4 and | |
34 | # Core processors, 50-30%, less newer processor is, but slower on | |
35 | # contemporary ones, for example almost 2x slower on Atom, and as | |
36 | # former are naturally disappearing, SSE2 is deemed unnecessary; | |
37 | ||
38 | $flavour = shift; | |
39 | $output = shift; | |
40 | if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } | |
41 | ||
42 | $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); | |
43 | ||
44 | $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; | |
45 | ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or | |
46 | ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or | |
47 | die "can't locate x86_64-xlate.pl"; | |
48 | ||
49 | if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1` | |
50 | =~ /GNU assembler version ([2-9]\.[0-9]+)/) { | |
51 | $avx = ($1>=2.19) + ($1>=2.22); | |
52 | } | |
53 | ||
54 | if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) && | |
55 | `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) { | |
56 | $avx = ($1>=2.09) + ($1>=2.10); | |
57 | } | |
58 | ||
59 | if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) && | |
60 | `ml64 2>&1` =~ /Version ([0-9]+)\./) { | |
61 | $avx = ($1>=10) + ($1>=12); | |
62 | } | |
63 | ||
64 | if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) { | |
65 | $avx = ($2>=3.0) + ($2>3.0); | |
66 | } | |
67 | ||
68 | open OUT,"| \"$^X\" $xlate $flavour $output"; | |
69 | *STDOUT=*OUT; | |
70 | ||
71 | my ($ctx,$inp,$len,$padbit)=("%rdi","%rsi","%rdx","%rcx"); | |
72 | my ($mac,$nonce)=($inp,$len); # *_emit arguments | |
73 | my ($d1,$d2,$d3, $r0,$r1,$s1)=map("%r$_",(8..13)); | |
74 | my ($h0,$h1,$h2)=("%r14","%rbx","%rbp"); | |
75 | ||
76 | sub poly1305_iteration { | |
77 | # input: copy of $r1 in %rax, $h0-$h2, $r0-$r1 | |
78 | # output: $h0-$h2 *= $r0-$r1 | |
79 | $code.=<<___; | |
80 | mulq $h0 # h0*r1 | |
81 | mov %rax,$d2 | |
82 | mov $r0,%rax | |
83 | mov %rdx,$d3 | |
84 | ||
85 | mulq $h0 # h0*r0 | |
86 | mov %rax,$h0 # future $h0 | |
87 | mov $r0,%rax | |
88 | mov %rdx,$d1 | |
89 | ||
90 | mulq $h1 # h1*r0 | |
91 | add %rax,$d2 | |
92 | mov $s1,%rax | |
93 | adc %rdx,$d3 | |
94 | ||
95 | mulq $h1 # h1*s1 | |
96 | mov $h2,$h1 # borrow $h1 | |
97 | add %rax,$h0 | |
98 | adc %rdx,$d1 | |
99 | ||
100 | imulq $s1,$h1 # h2*s1 | |
101 | add $h1,$d2 | |
102 | mov $d1,$h1 | |
103 | adc \$0,$d3 | |
104 | ||
105 | imulq $r0,$h2 # h2*r0 | |
106 | add $d2,$h1 | |
107 | mov \$-4,%rax # mask value | |
108 | adc $h2,$d3 | |
109 | ||
110 | and $d3,%rax # last reduction step | |
111 | mov $d3,$h2 | |
112 | shr \$2,$d3 | |
113 | and \$3,$h2 | |
114 | add $d3,%rax | |
115 | add %rax,$h0 | |
116 | adc \$0,$h1 | |
117 | ___ | |
118 | } | |
119 | ||
120 | ######################################################################## | |
121 | # Layout of opaque area is following. | |
122 | # | |
123 | # unsigned __int64 h[3]; # current hash value base 2^64 | |
124 | # unsigned __int64 r[2]; # key value base 2^64 | |
125 | ||
126 | $code.=<<___; | |
127 | .text | |
128 | ||
129 | .extern OPENSSL_ia32cap_P | |
130 | ||
131 | .globl poly1305_init | |
4ef29667 AP |
132 | .globl poly1305_blocks |
133 | .globl poly1305_emit | |
a85dbf11 | 134 | .type poly1305_init,\@function,3 |
a98c648e AP |
135 | .align 32 |
136 | poly1305_init: | |
137 | xor %rax,%rax | |
138 | mov %rax,0($ctx) # initialize hash value | |
139 | mov %rax,8($ctx) | |
140 | mov %rax,16($ctx) | |
141 | ||
142 | cmp \$0,$inp | |
143 | je .Lno_key | |
144 | ||
145 | lea poly1305_blocks(%rip),%r10 | |
146 | lea poly1305_emit(%rip),%r11 | |
147 | ___ | |
148 | $code.=<<___ if ($avx); | |
149 | mov OPENSSL_ia32cap_P+4(%rip),%r9 | |
150 | lea poly1305_blocks_avx(%rip),%rax | |
151 | lea poly1305_emit_avx(%rip),%rcx | |
152 | bt \$`60-32`,%r9 # AVX? | |
153 | cmovc %rax,%r10 | |
154 | cmovc %rcx,%r11 | |
155 | ___ | |
156 | $code.=<<___ if ($avx>1); | |
157 | lea poly1305_blocks_avx2(%rip),%rax | |
158 | bt \$`5+32`,%r9 # AVX2? | |
159 | cmovc %rax,%r10 | |
160 | ___ | |
161 | $code.=<<___; | |
162 | mov \$0x0ffffffc0fffffff,%rax | |
163 | mov \$0x0ffffffc0ffffffc,%rcx | |
164 | and 0($inp),%rax | |
165 | and 8($inp),%rcx | |
166 | mov %rax,24($ctx) | |
167 | mov %rcx,32($ctx) | |
168 | ||
169 | mov %r10,0(%rdx) | |
170 | mov %r11,8(%rdx) | |
171 | ||
172 | mov \$1,%eax | |
173 | .Lno_key: | |
174 | ret | |
175 | .size poly1305_init,.-poly1305_init | |
176 | ||
a98c648e AP |
177 | .type poly1305_blocks,\@function,4 |
178 | .align 32 | |
179 | poly1305_blocks: | |
a85dbf11 | 180 | .Lblocks: |
a98c648e AP |
181 | sub \$16,$len # too short? |
182 | jc .Lno_data | |
183 | ||
184 | push %rbx | |
185 | push %rbp | |
186 | push %r12 | |
187 | push %r13 | |
188 | push %r14 | |
189 | push %r15 | |
190 | .Lblocks_body: | |
191 | ||
192 | mov $len,%r15 # reassign $len | |
193 | ||
194 | mov 24($ctx),$r0 # load r | |
195 | mov 32($ctx),$s1 | |
196 | ||
197 | mov 0($ctx),$h0 # load hash value | |
198 | mov 8($ctx),$h1 | |
199 | mov 16($ctx),$h2 | |
200 | ||
201 | mov $s1,$r1 | |
202 | shr \$2,$s1 | |
203 | mov $r1,%rax | |
204 | add $r1,$s1 # s1 = r1 + (r1 >> 2) | |
205 | jmp .Loop | |
206 | ||
207 | .align 32 | |
208 | .Loop: | |
209 | add 0($inp),$h0 # accumulate input | |
210 | adc 8($inp),$h1 | |
211 | lea 16($inp),$inp | |
212 | adc $padbit,$h2 | |
213 | ___ | |
214 | &poly1305_iteration(); | |
215 | $code.=<<___; | |
216 | mov $r1,%rax | |
217 | sub \$16,%r15 # len-=16 | |
218 | jnc .Loop | |
219 | ||
220 | mov $h0,0($ctx) # store hash value | |
221 | mov $h1,8($ctx) | |
222 | mov $h2,16($ctx) | |
223 | ||
224 | mov 0(%rsp),%r15 | |
225 | mov 8(%rsp),%r14 | |
226 | mov 16(%rsp),%r13 | |
227 | mov 24(%rsp),%r12 | |
228 | mov 32(%rsp),%rbp | |
229 | mov 40(%rsp),%rbx | |
230 | lea 48(%rsp),%rsp | |
231 | .Lno_data: | |
232 | .Lblocks_epilogue: | |
233 | ret | |
234 | .size poly1305_blocks,.-poly1305_blocks | |
235 | ||
a98c648e AP |
236 | .type poly1305_emit,\@function,3 |
237 | .align 32 | |
238 | poly1305_emit: | |
a85dbf11 | 239 | .Lemit: |
a98c648e AP |
240 | mov 0($ctx),%r8 # load hash value |
241 | mov 8($ctx),%r9 | |
242 | mov 16($ctx),%r10 | |
243 | ||
244 | mov %r8,%rax | |
245 | add \$5,%r8 # compare to modulus | |
246 | mov %r9,%rcx | |
247 | adc \$0,%r9 | |
248 | adc \$0,%r10 | |
249 | shr \$2,%r10 # did 130-bit value overfow? | |
250 | cmovnz %r8,%rax | |
251 | cmovnz %r9,%rcx | |
252 | ||
253 | add 0($nonce),%rax # accumulate nonce | |
254 | adc 8($nonce),%rcx | |
255 | mov %rax,0($mac) # write result | |
256 | mov %rcx,8($mac) | |
257 | ||
258 | ret | |
259 | .size poly1305_emit,.-poly1305_emit | |
260 | ___ | |
261 | if ($avx) { | |
262 | ||
263 | ######################################################################## | |
264 | # Layout of opaque area is following. | |
265 | # | |
266 | # unsigned __int32 h[5]; # current hash value base 2^26 | |
267 | # unsigned __int32 is_base2_26; | |
268 | # unsigned __int64 r[2]; # key value base 2^64 | |
269 | # unsigned __int64 pad; | |
270 | # struct { unsigned __int32 r^2, r^1, r^4, r^3; } r[9]; | |
271 | # | |
272 | # where r^n are base 2^26 digits of degrees of multiplier key. There are | |
273 | # 5 digits, but last four are interleaved with multiples of 5, totalling | |
274 | # in 9 elements: r0, r1, 5*r1, r2, 5*r2, r3, 5*r3, r4, 5*r4. | |
275 | ||
276 | my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) = | |
277 | map("%xmm$_",(0..15)); | |
278 | ||
279 | $code.=<<___; | |
280 | .type __poly1305_block,\@abi-omnipotent | |
281 | .align 32 | |
282 | __poly1305_block: | |
283 | ___ | |
284 | &poly1305_iteration(); | |
285 | $code.=<<___; | |
286 | ret | |
287 | .size __poly1305_block,.-__poly1305_block | |
288 | ||
289 | .type __poly1305_init_avx,\@abi-omnipotent | |
290 | .align 32 | |
291 | __poly1305_init_avx: | |
292 | mov $r0,$h0 | |
293 | mov $r1,$h1 | |
294 | xor $h2,$h2 | |
295 | ||
296 | lea 48+64($ctx),$ctx # size optimization | |
297 | ||
298 | mov $r1,%rax | |
299 | call __poly1305_block # r^2 | |
300 | ||
301 | mov \$0x3ffffff,%eax # save interleaved r^2 and r base 2^26 | |
302 | mov \$0x3ffffff,%edx | |
303 | mov $h0,$d1 | |
304 | and $h0#d,%eax | |
305 | mov $r0,$d2 | |
306 | and $r0#d,%edx | |
307 | mov %eax,`16*0+0-64`($ctx) | |
308 | shr \$26,$d1 | |
309 | mov %edx,`16*0+4-64`($ctx) | |
310 | shr \$26,$d2 | |
311 | ||
312 | mov \$0x3ffffff,%eax | |
313 | mov \$0x3ffffff,%edx | |
314 | and $d1#d,%eax | |
315 | and $d2#d,%edx | |
316 | mov %eax,`16*1+0-64`($ctx) | |
317 | lea (%rax,%rax,4),%eax # *5 | |
318 | mov %edx,`16*1+4-64`($ctx) | |
319 | lea (%rdx,%rdx,4),%edx # *5 | |
320 | mov %eax,`16*2+0-64`($ctx) | |
321 | shr \$26,$d1 | |
322 | mov %edx,`16*2+4-64`($ctx) | |
323 | shr \$26,$d2 | |
324 | ||
325 | mov $h1,%rax | |
326 | mov $r1,%rdx | |
327 | shl \$12,%rax | |
328 | shl \$12,%rdx | |
329 | or $d1,%rax | |
330 | or $d2,%rdx | |
331 | and \$0x3ffffff,%eax | |
332 | and \$0x3ffffff,%edx | |
333 | mov %eax,`16*3+0-64`($ctx) | |
334 | lea (%rax,%rax,4),%eax # *5 | |
335 | mov %edx,`16*3+4-64`($ctx) | |
336 | lea (%rdx,%rdx,4),%edx # *5 | |
337 | mov %eax,`16*4+0-64`($ctx) | |
338 | mov $h1,$d1 | |
339 | mov %edx,`16*4+4-64`($ctx) | |
340 | mov $r1,$d2 | |
341 | ||
342 | mov \$0x3ffffff,%eax | |
343 | mov \$0x3ffffff,%edx | |
344 | shr \$14,$d1 | |
345 | shr \$14,$d2 | |
346 | and $d1#d,%eax | |
347 | and $d2#d,%edx | |
348 | mov %eax,`16*5+0-64`($ctx) | |
349 | lea (%rax,%rax,4),%eax # *5 | |
350 | mov %edx,`16*5+4-64`($ctx) | |
351 | lea (%rdx,%rdx,4),%edx # *5 | |
352 | mov %eax,`16*6+0-64`($ctx) | |
353 | shr \$26,$d1 | |
354 | mov %edx,`16*6+4-64`($ctx) | |
355 | shr \$26,$d2 | |
356 | ||
357 | mov $h2,%rax | |
358 | shl \$24,%rax | |
359 | or %rax,$d1 | |
360 | mov $d1#d,`16*7+0-64`($ctx) | |
361 | lea ($d1,$d1,4),$d1 # *5 | |
362 | mov $d2#d,`16*7+4-64`($ctx) | |
363 | lea ($d2,$d2,4),$d2 # *5 | |
364 | mov $d1#d,`16*8+0-64`($ctx) | |
365 | mov $d2#d,`16*8+4-64`($ctx) | |
366 | ||
367 | mov $r1,%rax | |
368 | call __poly1305_block # r^3 | |
369 | ||
370 | mov \$0x3ffffff,%eax # save r^3 base 2^26 | |
371 | mov $h0,$d1 | |
372 | and $h0#d,%eax | |
373 | shr \$26,$d1 | |
374 | mov %eax,`16*0+12-64`($ctx) | |
375 | ||
376 | mov \$0x3ffffff,%edx | |
377 | and $d1#d,%edx | |
378 | mov %edx,`16*1+12-64`($ctx) | |
379 | lea (%rdx,%rdx,4),%edx # *5 | |
380 | shr \$26,$d1 | |
381 | mov %edx,`16*2+12-64`($ctx) | |
382 | ||
383 | mov $h1,%rax | |
384 | shl \$12,%rax | |
385 | or $d1,%rax | |
386 | and \$0x3ffffff,%eax | |
387 | mov %eax,`16*3+12-64`($ctx) | |
388 | lea (%rax,%rax,4),%eax # *5 | |
389 | mov $h1,$d1 | |
390 | mov %eax,`16*4+12-64`($ctx) | |
391 | ||
392 | mov \$0x3ffffff,%edx | |
393 | shr \$14,$d1 | |
394 | and $d1#d,%edx | |
395 | mov %edx,`16*5+12-64`($ctx) | |
396 | lea (%rdx,%rdx,4),%edx # *5 | |
397 | shr \$26,$d1 | |
398 | mov %edx,`16*6+12-64`($ctx) | |
399 | ||
400 | mov $h2,%rax | |
401 | shl \$24,%rax | |
402 | or %rax,$d1 | |
403 | mov $d1#d,`16*7+12-64`($ctx) | |
404 | lea ($d1,$d1,4),$d1 # *5 | |
405 | mov $d1#d,`16*8+12-64`($ctx) | |
406 | ||
407 | mov $r1,%rax | |
408 | call __poly1305_block # r^4 | |
409 | ||
410 | mov \$0x3ffffff,%eax # save r^4 base 2^26 | |
411 | mov $h0,$d1 | |
412 | and $h0#d,%eax | |
413 | shr \$26,$d1 | |
414 | mov %eax,`16*0+8-64`($ctx) | |
415 | ||
416 | mov \$0x3ffffff,%edx | |
417 | and $d1#d,%edx | |
418 | mov %edx,`16*1+8-64`($ctx) | |
419 | lea (%rdx,%rdx,4),%edx # *5 | |
420 | shr \$26,$d1 | |
421 | mov %edx,`16*2+8-64`($ctx) | |
422 | ||
423 | mov $h1,%rax | |
424 | shl \$12,%rax | |
425 | or $d1,%rax | |
426 | and \$0x3ffffff,%eax | |
427 | mov %eax,`16*3+8-64`($ctx) | |
428 | lea (%rax,%rax,4),%eax # *5 | |
429 | mov $h1,$d1 | |
430 | mov %eax,`16*4+8-64`($ctx) | |
431 | ||
432 | mov \$0x3ffffff,%edx | |
433 | shr \$14,$d1 | |
434 | and $d1#d,%edx | |
435 | mov %edx,`16*5+8-64`($ctx) | |
436 | lea (%rdx,%rdx,4),%edx # *5 | |
437 | shr \$26,$d1 | |
438 | mov %edx,`16*6+8-64`($ctx) | |
439 | ||
440 | mov $h2,%rax | |
441 | shl \$24,%rax | |
442 | or %rax,$d1 | |
443 | mov $d1#d,`16*7+8-64`($ctx) | |
444 | lea ($d1,$d1,4),$d1 # *5 | |
445 | mov $d1#d,`16*8+8-64`($ctx) | |
446 | ||
447 | lea -48-64($ctx),$ctx # size [de-]optimization | |
448 | ret | |
449 | .size __poly1305_init_avx,.-__poly1305_init_avx | |
450 | ||
451 | .type poly1305_blocks_avx,\@function,4 | |
452 | .align 32 | |
453 | poly1305_blocks_avx: | |
454 | mov 20($ctx),%r8d # is_base2_26 | |
455 | cmp \$128,$len | |
456 | jae .Lblocks_avx | |
457 | test %r8d,%r8d | |
a85dbf11 | 458 | jz .Lblocks |
a98c648e AP |
459 | |
460 | .Lblocks_avx: | |
461 | and \$-16,$len | |
462 | jz .Lno_data_avx | |
463 | ||
464 | vzeroupper | |
465 | ||
466 | test %r8d,%r8d | |
467 | jz .Lbase2_64_avx | |
468 | ||
469 | test \$31,$len | |
470 | jz .Leven_avx | |
471 | ||
472 | push %rbx | |
473 | push %rbp | |
474 | push %r12 | |
475 | push %r13 | |
476 | push %r14 | |
477 | push %r15 | |
478 | .Lblocks_avx_body: | |
479 | ||
480 | mov $len,%r15 # reassign $len | |
481 | ||
482 | mov 0($ctx),$d1 # load hash value | |
483 | mov 8($ctx),$d2 | |
484 | mov 16($ctx),$h2#d | |
485 | ||
486 | mov 24($ctx),$r0 # load r | |
487 | mov 32($ctx),$s1 | |
488 | ||
489 | ################################# base 2^26 -> base 2^64 | |
490 | mov $d1#d,$h0#d | |
491 | and \$-1<<31,$d1 | |
492 | mov $d2,$r1 # borrow $r1 | |
493 | mov $d2#d,$h1#d | |
494 | and \$-1<<31,$d2 | |
495 | ||
496 | shr \$6,$d1 | |
497 | shl \$52,$r1 | |
498 | add $d1,$h0 | |
499 | shr \$12,$h1 | |
500 | shr \$18,$d2 | |
501 | add $r1,$h0 | |
502 | adc $d2,$h1 | |
503 | ||
504 | mov $h2,$d1 | |
505 | shl \$40,$d1 | |
506 | shr \$24,$h2 | |
507 | add $d1,$h1 | |
508 | adc \$0,$h2 # can be partially reduced... | |
509 | ||
510 | mov \$-4,$d2 # ... so reduce | |
511 | mov $h2,$d1 | |
512 | and $h2,$d2 | |
513 | shr \$2,$d1 | |
514 | and \$3,$h2 | |
515 | add $d2,$d1 # =*5 | |
516 | add $d1,$h0 | |
517 | adc \$0,$h1 | |
518 | ||
519 | mov $s1,$r1 | |
520 | mov $s1,%rax | |
521 | shr \$2,$s1 | |
522 | add $r1,$s1 # s1 = r1 + (r1 >> 2) | |
523 | ||
524 | add 0($inp),$h0 # accumulate input | |
525 | adc 8($inp),$h1 | |
526 | lea 16($inp),$inp | |
527 | adc $padbit,$h2 | |
528 | ||
529 | call __poly1305_block | |
530 | ||
531 | test $padbit,$padbit # if $padbit is zero, | |
532 | jz .Lstore_base2_64_avx # store hash in base 2^64 format | |
533 | ||
534 | ################################# base 2^64 -> base 2^26 | |
535 | mov $h0,%rax | |
536 | mov $h0,%rdx | |
537 | shr \$52,$h0 | |
538 | mov $h1,$r0 | |
539 | mov $h1,$r1 | |
540 | shr \$26,%rdx | |
541 | and \$0x3ffffff,%rax # h[0] | |
542 | shl \$12,$r0 | |
543 | and \$0x3ffffff,%rdx # h[1] | |
544 | shr \$14,$h1 | |
545 | or $r0,$h0 | |
546 | shl \$24,$h2 | |
547 | and \$0x3ffffff,$h0 # h[2] | |
548 | shr \$40,$r1 | |
549 | and \$0x3ffffff,$h1 # h[3] | |
550 | or $r1,$h2 # h[4] | |
551 | ||
552 | sub \$16,%r15 | |
553 | jz .Lstore_base2_26_avx | |
554 | ||
555 | vmovd %rax#d,$H0 | |
556 | vmovd %rdx#d,$H1 | |
557 | vmovd $h0#d,$H2 | |
558 | vmovd $h1#d,$H3 | |
559 | vmovd $h2#d,$H4 | |
560 | jmp .Lproceed_avx | |
561 | ||
562 | .align 32 | |
563 | .Lstore_base2_64_avx: | |
564 | mov $h0,0($ctx) | |
565 | mov $h1,8($ctx) | |
566 | mov $h2,16($ctx) # note that is_base2_26 is zeroed | |
567 | jmp .Ldone_avx | |
568 | ||
569 | .align 16 | |
570 | .Lstore_base2_26_avx: | |
571 | mov %rax#d,0($ctx) # store hash value base 2^26 | |
572 | mov %rdx#d,4($ctx) | |
573 | mov $h0#d,8($ctx) | |
574 | mov $h1#d,12($ctx) | |
575 | mov $h2#d,16($ctx) | |
576 | .align 16 | |
577 | .Ldone_avx: | |
578 | mov 0(%rsp),%r15 | |
579 | mov 8(%rsp),%r14 | |
580 | mov 16(%rsp),%r13 | |
581 | mov 24(%rsp),%r12 | |
582 | mov 32(%rsp),%rbp | |
583 | mov 40(%rsp),%rbx | |
584 | lea 48(%rsp),%rsp | |
585 | .Lno_data_avx: | |
586 | .Lblocks_avx_epilogue: | |
587 | ret | |
588 | ||
589 | .align 32 | |
590 | .Lbase2_64_avx: | |
591 | push %rbx | |
592 | push %rbp | |
593 | push %r12 | |
594 | push %r13 | |
595 | push %r14 | |
596 | push %r15 | |
597 | .Lbase2_64_avx_body: | |
598 | ||
599 | mov $len,%r15 # reassign $len | |
600 | ||
601 | mov 24($ctx),$r0 # load r | |
602 | mov 32($ctx),$s1 | |
603 | ||
604 | mov 0($ctx),$h0 # load hash value | |
605 | mov 8($ctx),$h1 | |
606 | mov 16($ctx),$h2#d | |
607 | ||
608 | mov $s1,$r1 | |
609 | mov $s1,%rax | |
610 | shr \$2,$s1 | |
611 | add $r1,$s1 # s1 = r1 + (r1 >> 2) | |
612 | ||
613 | test \$31,$len | |
614 | jz .Linit_avx | |
615 | ||
616 | add 0($inp),$h0 # accumulate input | |
617 | adc 8($inp),$h1 | |
618 | lea 16($inp),$inp | |
619 | adc $padbit,$h2 | |
620 | sub \$16,%r15 | |
621 | ||
622 | call __poly1305_block | |
623 | ||
624 | .Linit_avx: | |
625 | ################################# base 2^64 -> base 2^26 | |
626 | mov $h0,%rax | |
627 | mov $h0,%rdx | |
628 | shr \$52,$h0 | |
629 | mov $h1,$d1 | |
630 | mov $h1,$d2 | |
631 | shr \$26,%rdx | |
632 | and \$0x3ffffff,%rax # h[0] | |
633 | shl \$12,$d1 | |
634 | and \$0x3ffffff,%rdx # h[1] | |
635 | shr \$14,$h1 | |
636 | or $d1,$h0 | |
637 | shl \$24,$h2 | |
638 | and \$0x3ffffff,$h0 # h[2] | |
639 | shr \$40,$d2 | |
640 | and \$0x3ffffff,$h1 # h[3] | |
641 | or $d2,$h2 # h[4] | |
642 | ||
643 | vmovd %rax#d,$H0 | |
644 | vmovd %rdx#d,$H1 | |
645 | vmovd $h0#d,$H2 | |
646 | vmovd $h1#d,$H3 | |
647 | vmovd $h2#d,$H4 | |
648 | movl \$1,20($ctx) # set is_base2_26 | |
649 | ||
650 | call __poly1305_init_avx | |
651 | ||
652 | .Lproceed_avx: | |
653 | mov %r15,$len | |
654 | ||
655 | mov 0(%rsp),%r15 | |
656 | mov 8(%rsp),%r14 | |
657 | mov 16(%rsp),%r13 | |
658 | mov 24(%rsp),%r12 | |
659 | mov 32(%rsp),%rbp | |
660 | mov 40(%rsp),%rbx | |
661 | lea 48(%rsp),%rax | |
662 | lea 48(%rsp),%rsp | |
663 | .Lbase2_64_avx_epilogue: | |
664 | jmp .Ldo_avx | |
665 | ||
666 | .align 32 | |
667 | .Leven_avx: | |
668 | vmovd 4*0($ctx),$H0 # load hash value | |
669 | vmovd 4*1($ctx),$H1 | |
670 | vmovd 4*2($ctx),$H2 | |
671 | vmovd 4*3($ctx),$H3 | |
672 | vmovd 4*4($ctx),$H4 | |
673 | ||
674 | .Ldo_avx: | |
675 | ___ | |
676 | $code.=<<___ if (!$win64); | |
677 | lea -0x58(%rsp),%r11 | |
678 | sub \$0x178,%rsp | |
679 | ___ | |
680 | $code.=<<___ if ($win64); | |
681 | lea -0xf8(%rsp),%r11 | |
682 | sub \$0x218,%rsp | |
683 | vmovdqa %xmm6,0x50(%r11) | |
684 | vmovdqa %xmm7,0x60(%r11) | |
685 | vmovdqa %xmm8,0x70(%r11) | |
686 | vmovdqa %xmm9,0x80(%r11) | |
687 | vmovdqa %xmm10,0x90(%r11) | |
688 | vmovdqa %xmm11,0xa0(%r11) | |
689 | vmovdqa %xmm12,0xb0(%r11) | |
690 | vmovdqa %xmm13,0xc0(%r11) | |
691 | vmovdqa %xmm14,0xd0(%r11) | |
692 | vmovdqa %xmm15,0xe0(%r11) | |
693 | .Ldo_avx_body: | |
694 | ___ | |
695 | $code.=<<___; | |
696 | sub \$64,$len | |
697 | lea -32($inp),%rax | |
698 | cmovc %rax,$inp | |
699 | ||
700 | vmovdqu `16*3`($ctx),$D4 # preload r0^2 | |
701 | lea `16*3+64`($ctx),$ctx # size optimization | |
702 | lea .Lconst(%rip),%rcx | |
703 | ||
704 | ################################################################ | |
705 | # load input | |
706 | vmovdqu 16*2($inp),$T0 | |
707 | vmovdqu 16*3($inp),$T1 | |
708 | vmovdqa 64(%rcx),$MASK # .Lmask26 | |
709 | ||
710 | vpsrldq \$6,$T0,$T2 # splat input | |
711 | vpsrldq \$6,$T1,$T3 | |
712 | vpunpckhqdq $T1,$T0,$T4 # 4 | |
713 | vpunpcklqdq $T1,$T0,$T0 # 0:1 | |
714 | vpunpcklqdq $T3,$T2,$T3 # 2:3 | |
715 | ||
716 | vpsrlq \$40,$T4,$T4 # 4 | |
717 | vpsrlq \$26,$T0,$T1 | |
718 | vpand $MASK,$T0,$T0 # 0 | |
719 | vpsrlq \$4,$T3,$T2 | |
720 | vpand $MASK,$T1,$T1 # 1 | |
721 | vpsrlq \$30,$T3,$T3 | |
722 | vpand $MASK,$T2,$T2 # 2 | |
723 | vpand $MASK,$T3,$T3 # 3 | |
724 | vpor 32(%rcx),$T4,$T4 # padbit, yes, always | |
725 | ||
726 | jbe .Lskip_loop_avx | |
727 | ||
728 | # expand and copy pre-calculated table to stack | |
729 | vmovdqu `16*1-64`($ctx),$D1 | |
730 | vmovdqu `16*2-64`($ctx),$D2 | |
731 | vpshufd \$0xEE,$D4,$D3 # 34xx -> 3434 | |
732 | vpshufd \$0x44,$D4,$D0 # xx12 -> 1212 | |
733 | vmovdqa $D3,-0x90(%r11) | |
734 | vmovdqa $D0,0x00(%rsp) | |
735 | vpshufd \$0xEE,$D1,$D4 | |
736 | vmovdqu `16*3-64`($ctx),$D0 | |
737 | vpshufd \$0x44,$D1,$D1 | |
738 | vmovdqa $D4,-0x80(%r11) | |
739 | vmovdqa $D1,0x10(%rsp) | |
740 | vpshufd \$0xEE,$D2,$D3 | |
741 | vmovdqu `16*4-64`($ctx),$D1 | |
742 | vpshufd \$0x44,$D2,$D2 | |
743 | vmovdqa $D3,-0x70(%r11) | |
744 | vmovdqa $D2,0x20(%rsp) | |
745 | vpshufd \$0xEE,$D0,$D4 | |
746 | vmovdqu `16*5-64`($ctx),$D2 | |
747 | vpshufd \$0x44,$D0,$D0 | |
748 | vmovdqa $D4,-0x60(%r11) | |
749 | vmovdqa $D0,0x30(%rsp) | |
750 | vpshufd \$0xEE,$D1,$D3 | |
751 | vmovdqu `16*6-64`($ctx),$D0 | |
752 | vpshufd \$0x44,$D1,$D1 | |
753 | vmovdqa $D3,-0x50(%r11) | |
754 | vmovdqa $D1,0x40(%rsp) | |
755 | vpshufd \$0xEE,$D2,$D4 | |
756 | vmovdqu `16*7-64`($ctx),$D1 | |
757 | vpshufd \$0x44,$D2,$D2 | |
758 | vmovdqa $D4,-0x40(%r11) | |
759 | vmovdqa $D2,0x50(%rsp) | |
760 | vpshufd \$0xEE,$D0,$D3 | |
761 | vmovdqu `16*8-64`($ctx),$D2 | |
762 | vpshufd \$0x44,$D0,$D0 | |
763 | vmovdqa $D3,-0x30(%r11) | |
764 | vmovdqa $D0,0x60(%rsp) | |
765 | vpshufd \$0xEE,$D1,$D4 | |
766 | vpshufd \$0x44,$D1,$D1 | |
767 | vmovdqa $D4,-0x20(%r11) | |
768 | vmovdqa $D1,0x70(%rsp) | |
769 | vpshufd \$0xEE,$D2,$D3 | |
770 | vmovdqa 0x00(%rsp),$D4 # preload r0^2 | |
771 | vpshufd \$0x44,$D2,$D2 | |
772 | vmovdqa $D3,-0x10(%r11) | |
773 | vmovdqa $D2,0x80(%rsp) | |
774 | ||
775 | jmp .Loop_avx | |
776 | ||
777 | .align 32 | |
778 | .Loop_avx: | |
779 | ################################################################ | |
780 | # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2 | |
781 | # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r | |
782 | # \___________________/ | |
783 | # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2 | |
784 | # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r | |
785 | # \___________________/ \____________________/ | |
786 | # | |
787 | # Note that we start with inp[2:3]*r^2. This is because it | |
788 | # doesn't depend on reduction in previous iteration. | |
789 | ################################################################ | |
790 | # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 | |
791 | # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 | |
792 | # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 | |
793 | # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 | |
794 | # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 | |
795 | # | |
796 | # though note that $Tx and $Hx are "reversed" in this section, | |
797 | # and $D4 is preloaded with r0^2... | |
798 | ||
799 | vpmuludq $T0,$D4,$D0 # d0 = h0*r0 | |
800 | vpmuludq $T1,$D4,$D1 # d1 = h1*r0 | |
801 | vmovdqa $H2,0x20(%r11) # offload hash | |
802 | vpmuludq $T2,$D4,$D2 # d3 = h2*r0 | |
803 | vmovdqa 0x10(%rsp),$H2 # r1^2 | |
804 | vpmuludq $T3,$D4,$D3 # d3 = h3*r0 | |
805 | vpmuludq $T4,$D4,$D4 # d4 = h4*r0 | |
806 | ||
807 | vmovdqa $H0,0x00(%r11) # | |
808 | vpmuludq 0x20(%rsp),$T4,$H0 # h4*s1 | |
809 | vmovdqa $H1,0x10(%r11) # | |
810 | vpmuludq $T3,$H2,$H1 # h3*r1 | |
811 | vpaddq $H0,$D0,$D0 # d0 += h4*s1 | |
812 | vpaddq $H1,$D4,$D4 # d4 += h3*r1 | |
813 | vmovdqa $H3,0x30(%r11) # | |
814 | vpmuludq $T2,$H2,$H0 # h2*r1 | |
815 | vpmuludq $T1,$H2,$H1 # h1*r1 | |
816 | vpaddq $H0,$D3,$D3 # d3 += h2*r1 | |
817 | vmovdqa 0x30(%rsp),$H3 # r2^2 | |
818 | vpaddq $H1,$D2,$D2 # d2 += h1*r1 | |
819 | vmovdqa $H4,0x40(%r11) # | |
820 | vpmuludq $T0,$H2,$H2 # h0*r1 | |
821 | vpmuludq $T2,$H3,$H0 # h2*r2 | |
822 | vpaddq $H2,$D1,$D1 # d1 += h0*r1 | |
823 | ||
824 | vmovdqa 0x40(%rsp),$H4 # s2^2 | |
825 | vpaddq $H0,$D4,$D4 # d4 += h2*r2 | |
826 | vpmuludq $T1,$H3,$H1 # h1*r2 | |
827 | vpmuludq $T0,$H3,$H3 # h0*r2 | |
828 | vpaddq $H1,$D3,$D3 # d3 += h1*r2 | |
829 | vmovdqa 0x50(%rsp),$H2 # r3^2 | |
830 | vpaddq $H3,$D2,$D2 # d2 += h0*r2 | |
831 | vpmuludq $T4,$H4,$H0 # h4*s2 | |
832 | vpmuludq $T3,$H4,$H4 # h3*s2 | |
833 | vpaddq $H0,$D1,$D1 # d1 += h4*s2 | |
834 | vmovdqa 0x60(%rsp),$H3 # s3^2 | |
835 | vpaddq $H4,$D0,$D0 # d0 += h3*s2 | |
836 | ||
837 | vmovdqa 0x80(%rsp),$H4 # s4^2 | |
838 | vpmuludq $T1,$H2,$H1 # h1*r3 | |
839 | vpmuludq $T0,$H2,$H2 # h0*r3 | |
840 | vpaddq $H1,$D4,$D4 # d4 += h1*r3 | |
841 | vpaddq $H2,$D3,$D3 # d3 += h0*r3 | |
842 | vpmuludq $T4,$H3,$H0 # h4*s3 | |
843 | vpmuludq $T3,$H3,$H1 # h3*s3 | |
844 | vpaddq $H0,$D2,$D2 # d2 += h4*s3 | |
845 | vmovdqu 16*0($inp),$H0 # load input | |
846 | vpaddq $H1,$D1,$D1 # d1 += h3*s3 | |
847 | vpmuludq $T2,$H3,$H3 # h2*s3 | |
848 | vpmuludq $T2,$H4,$T2 # h2*s4 | |
849 | vpaddq $H3,$D0,$D0 # d0 += h2*s3 | |
850 | ||
851 | vmovdqu 16*1($inp),$H1 # | |
852 | vpaddq $T2,$D1,$D1 # d1 += h2*s4 | |
853 | vpmuludq $T3,$H4,$T3 # h3*s4 | |
854 | vpmuludq $T4,$H4,$T4 # h4*s4 | |
855 | vpsrldq \$6,$H0,$H2 # splat input | |
856 | vpaddq $T3,$D2,$D2 # d2 += h3*s4 | |
857 | vpaddq $T4,$D3,$D3 # d3 += h4*s4 | |
858 | vpsrldq \$6,$H1,$H3 # | |
859 | vpmuludq 0x70(%rsp),$T0,$T4 # h0*r4 | |
860 | vpmuludq $T1,$H4,$T0 # h1*s4 | |
861 | vpunpckhqdq $H1,$H0,$H4 # 4 | |
862 | vpaddq $T4,$D4,$D4 # d4 += h0*r4 | |
863 | vmovdqa -0x90(%r11),$T4 # r0^4 | |
864 | vpaddq $T0,$D0,$D0 # d0 += h1*s4 | |
865 | ||
866 | vpunpcklqdq $H1,$H0,$H0 # 0:1 | |
867 | vpunpcklqdq $H3,$H2,$H3 # 2:3 | |
868 | ||
869 | #vpsrlq \$40,$H4,$H4 # 4 | |
870 | vpsrldq \$`40/8`,$H4,$H4 # 4 | |
871 | vpsrlq \$26,$H0,$H1 | |
872 | vpand $MASK,$H0,$H0 # 0 | |
873 | vpsrlq \$4,$H3,$H2 | |
874 | vpand $MASK,$H1,$H1 # 1 | |
875 | vpand 0(%rcx),$H4,$H4 # .Lmask24 | |
876 | vpsrlq \$30,$H3,$H3 | |
877 | vpand $MASK,$H2,$H2 # 2 | |
878 | vpand $MASK,$H3,$H3 # 3 | |
879 | vpor 32(%rcx),$H4,$H4 # padbit, yes, always | |
880 | ||
881 | vpaddq 0x00(%r11),$H0,$H0 # add hash value | |
882 | vpaddq 0x10(%r11),$H1,$H1 | |
883 | vpaddq 0x20(%r11),$H2,$H2 | |
884 | vpaddq 0x30(%r11),$H3,$H3 | |
885 | vpaddq 0x40(%r11),$H4,$H4 | |
886 | ||
887 | lea 16*2($inp),%rax | |
888 | lea 16*4($inp),$inp | |
889 | sub \$64,$len | |
890 | cmovc %rax,$inp | |
891 | ||
892 | ################################################################ | |
893 | # Now we accumulate (inp[0:1]+hash)*r^4 | |
894 | ################################################################ | |
895 | # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 | |
896 | # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 | |
897 | # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 | |
898 | # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 | |
899 | # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 | |
900 | ||
901 | vpmuludq $H0,$T4,$T0 # h0*r0 | |
902 | vpmuludq $H1,$T4,$T1 # h1*r0 | |
903 | vpaddq $T0,$D0,$D0 | |
904 | vpaddq $T1,$D1,$D1 | |
905 | vmovdqa -0x80(%r11),$T2 # r1^4 | |
906 | vpmuludq $H2,$T4,$T0 # h2*r0 | |
907 | vpmuludq $H3,$T4,$T1 # h3*r0 | |
908 | vpaddq $T0,$D2,$D2 | |
909 | vpaddq $T1,$D3,$D3 | |
910 | vpmuludq $H4,$T4,$T4 # h4*r0 | |
911 | vpmuludq -0x70(%r11),$H4,$T0 # h4*s1 | |
912 | vpaddq $T4,$D4,$D4 | |
913 | ||
914 | vpaddq $T0,$D0,$D0 # d0 += h4*s1 | |
915 | vpmuludq $H2,$T2,$T1 # h2*r1 | |
916 | vpmuludq $H3,$T2,$T0 # h3*r1 | |
917 | vpaddq $T1,$D3,$D3 # d3 += h2*r1 | |
918 | vmovdqa -0x60(%r11),$T3 # r2^4 | |
919 | vpaddq $T0,$D4,$D4 # d4 += h3*r1 | |
920 | vpmuludq $H1,$T2,$T1 # h1*r1 | |
921 | vpmuludq $H0,$T2,$T2 # h0*r1 | |
922 | vpaddq $T1,$D2,$D2 # d2 += h1*r1 | |
923 | vpaddq $T2,$D1,$D1 # d1 += h0*r1 | |
924 | ||
925 | vmovdqa -0x50(%r11),$T4 # s2^4 | |
926 | vpmuludq $H2,$T3,$T0 # h2*r2 | |
927 | vpmuludq $H1,$T3,$T1 # h1*r2 | |
928 | vpaddq $T0,$D4,$D4 # d4 += h2*r2 | |
929 | vpaddq $T1,$D3,$D3 # d3 += h1*r2 | |
930 | vmovdqa -0x40(%r11),$T2 # r3^4 | |
931 | vpmuludq $H0,$T3,$T3 # h0*r2 | |
932 | vpmuludq $H4,$T4,$T0 # h4*s2 | |
933 | vpaddq $T3,$D2,$D2 # d2 += h0*r2 | |
934 | vpaddq $T0,$D1,$D1 # d1 += h4*s2 | |
935 | vmovdqa -0x30(%r11),$T3 # s3^4 | |
936 | vpmuludq $H3,$T4,$T4 # h3*s2 | |
937 | vpmuludq $H1,$T2,$T1 # h1*r3 | |
938 | vpaddq $T4,$D0,$D0 # d0 += h3*s2 | |
939 | ||
940 | vmovdqa -0x10(%r11),$T4 # s4^4 | |
941 | vpaddq $T1,$D4,$D4 # d4 += h1*r3 | |
942 | vpmuludq $H0,$T2,$T2 # h0*r3 | |
943 | vpmuludq $H4,$T3,$T0 # h4*s3 | |
944 | vpaddq $T2,$D3,$D3 # d3 += h0*r3 | |
945 | vpaddq $T0,$D2,$D2 # d2 += h4*s3 | |
946 | vmovdqu 16*2($inp),$T0 # load input | |
947 | vpmuludq $H3,$T3,$T2 # h3*s3 | |
948 | vpmuludq $H2,$T3,$T3 # h2*s3 | |
949 | vpaddq $T2,$D1,$D1 # d1 += h3*s3 | |
950 | vmovdqu 16*3($inp),$T1 # | |
951 | vpaddq $T3,$D0,$D0 # d0 += h2*s3 | |
952 | ||
953 | vpmuludq $H2,$T4,$H2 # h2*s4 | |
954 | vpmuludq $H3,$T4,$H3 # h3*s4 | |
955 | vpsrldq \$6,$T0,$T2 # splat input | |
956 | vpaddq $H2,$D1,$D1 # d1 += h2*s4 | |
957 | vpmuludq $H4,$T4,$H4 # h4*s4 | |
958 | vpsrldq \$6,$T1,$T3 # | |
959 | vpaddq $H3,$D2,$H2 # h2 = d2 + h3*s4 | |
960 | vpaddq $H4,$D3,$H3 # h3 = d3 + h4*s4 | |
961 | vpmuludq -0x20(%r11),$H0,$H4 # h0*r4 | |
962 | vpmuludq $H1,$T4,$H0 | |
963 | vpunpckhqdq $T1,$T0,$T4 # 4 | |
964 | vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4 | |
965 | vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4 | |
966 | ||
967 | vpunpcklqdq $T1,$T0,$T0 # 0:1 | |
968 | vpunpcklqdq $T3,$T2,$T3 # 2:3 | |
969 | ||
970 | #vpsrlq \$40,$T4,$T4 # 4 | |
971 | vpsrldq \$`40/8`,$T4,$T4 # 4 | |
972 | vpsrlq \$26,$T0,$T1 | |
973 | vmovdqa 0x00(%rsp),$D4 # preload r0^2 | |
974 | vpand $MASK,$T0,$T0 # 0 | |
975 | vpsrlq \$4,$T3,$T2 | |
976 | vpand $MASK,$T1,$T1 # 1 | |
977 | vpand 0(%rcx),$T4,$T4 # .Lmask24 | |
978 | vpsrlq \$30,$T3,$T3 | |
979 | vpand $MASK,$T2,$T2 # 2 | |
980 | vpand $MASK,$T3,$T3 # 3 | |
981 | vpor 32(%rcx),$T4,$T4 # padbit, yes, always | |
982 | ||
983 | ################################################################ | |
984 | # lazy reduction as discussed in "NEON crypto" by D.J. Bernstein | |
985 | # and P. Schwabe | |
986 | ||
987 | vpsrlq \$26,$H3,$D3 | |
988 | vpand $MASK,$H3,$H3 | |
989 | vpaddq $D3,$H4,$H4 # h3 -> h4 | |
990 | ||
991 | vpsrlq \$26,$H0,$D0 | |
992 | vpand $MASK,$H0,$H0 | |
993 | vpaddq $D0,$D1,$H1 # h0 -> h1 | |
994 | ||
995 | vpsrlq \$26,$H4,$D0 | |
996 | vpand $MASK,$H4,$H4 | |
997 | ||
998 | vpsrlq \$26,$H1,$D1 | |
999 | vpand $MASK,$H1,$H1 | |
1000 | vpaddq $D1,$H2,$H2 # h1 -> h2 | |
1001 | ||
1002 | vpaddq $D0,$H0,$H0 | |
1003 | vpsllq \$2,$D0,$D0 | |
1004 | vpaddq $D0,$H0,$H0 # h4 -> h0 | |
1005 | ||
1006 | vpsrlq \$26,$H2,$D2 | |
1007 | vpand $MASK,$H2,$H2 | |
1008 | vpaddq $D2,$H3,$H3 # h2 -> h3 | |
1009 | ||
1010 | vpsrlq \$26,$H0,$D0 | |
1011 | vpand $MASK,$H0,$H0 | |
1012 | vpaddq $D0,$H1,$H1 # h0 -> h1 | |
1013 | ||
1014 | vpsrlq \$26,$H3,$D3 | |
1015 | vpand $MASK,$H3,$H3 | |
1016 | vpaddq $D3,$H4,$H4 # h3 -> h4 | |
1017 | ||
1018 | ja .Loop_avx | |
1019 | ||
1020 | .Lskip_loop_avx: | |
1021 | ################################################################ | |
1022 | # multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1 | |
1023 | ||
1024 | vpshufd \$0x10,$D4,$D4 # r0^n, xx12 -> x1x2 | |
1025 | add \$32,$len | |
1026 | jnz .Long_tail_avx | |
1027 | ||
1028 | vpaddq $H2,$T2,$T2 | |
1029 | vpaddq $H0,$T0,$T0 | |
1030 | vpaddq $H1,$T1,$T1 | |
1031 | vpaddq $H3,$T3,$T3 | |
1032 | vpaddq $H4,$T4,$T4 | |
1033 | ||
1034 | .Long_tail_avx: | |
1035 | vmovdqa $H2,0x20(%r11) | |
1036 | vmovdqa $H0,0x00(%r11) | |
1037 | vmovdqa $H1,0x10(%r11) | |
1038 | vmovdqa $H3,0x30(%r11) | |
1039 | vmovdqa $H4,0x40(%r11) | |
1040 | ||
1041 | # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 | |
1042 | # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 | |
1043 | # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 | |
1044 | # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 | |
1045 | # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 | |
1046 | ||
1047 | vpmuludq $T2,$D4,$D2 # d2 = h2*r0 | |
1048 | vpmuludq $T0,$D4,$D0 # d0 = h0*r0 | |
1049 | vpshufd \$0x10,`16*1-64`($ctx),$H2 # r1^n | |
1050 | vpmuludq $T1,$D4,$D1 # d1 = h1*r0 | |
1051 | vpmuludq $T3,$D4,$D3 # d3 = h3*r0 | |
1052 | vpmuludq $T4,$D4,$D4 # d4 = h4*r0 | |
1053 | ||
1054 | vpmuludq $T3,$H2,$H0 # h3*r1 | |
1055 | vpaddq $H0,$D4,$D4 # d4 += h3*r1 | |
1056 | vpshufd \$0x10,`16*2-64`($ctx),$H3 # s1^n | |
1057 | vpmuludq $T2,$H2,$H1 # h2*r1 | |
1058 | vpaddq $H1,$D3,$D3 # d3 += h2*r1 | |
1059 | vpshufd \$0x10,`16*3-64`($ctx),$H4 # r2^n | |
1060 | vpmuludq $T1,$H2,$H0 # h1*r1 | |
1061 | vpaddq $H0,$D2,$D2 # d2 += h1*r1 | |
1062 | vpmuludq $T0,$H2,$H2 # h0*r1 | |
1063 | vpaddq $H2,$D1,$D1 # d1 += h0*r1 | |
1064 | vpmuludq $T4,$H3,$H3 # h4*s1 | |
1065 | vpaddq $H3,$D0,$D0 # d0 += h4*s1 | |
1066 | ||
1067 | vpshufd \$0x10,`16*4-64`($ctx),$H2 # s2^n | |
1068 | vpmuludq $T2,$H4,$H1 # h2*r2 | |
1069 | vpaddq $H1,$D4,$D4 # d4 += h2*r2 | |
1070 | vpmuludq $T1,$H4,$H0 # h1*r2 | |
1071 | vpaddq $H0,$D3,$D3 # d3 += h1*r2 | |
1072 | vpshufd \$0x10,`16*5-64`($ctx),$H3 # r3^n | |
1073 | vpmuludq $T0,$H4,$H4 # h0*r2 | |
1074 | vpaddq $H4,$D2,$D2 # d2 += h0*r2 | |
1075 | vpmuludq $T4,$H2,$H1 # h4*s2 | |
1076 | vpaddq $H1,$D1,$D1 # d1 += h4*s2 | |
1077 | vpshufd \$0x10,`16*6-64`($ctx),$H4 # s3^n | |
1078 | vpmuludq $T3,$H2,$H2 # h3*s2 | |
1079 | vpaddq $H2,$D0,$D0 # d0 += h3*s2 | |
1080 | ||
1081 | vpmuludq $T1,$H3,$H0 # h1*r3 | |
1082 | vpaddq $H0,$D4,$D4 # d4 += h1*r3 | |
1083 | vpmuludq $T0,$H3,$H3 # h0*r3 | |
1084 | vpaddq $H3,$D3,$D3 # d3 += h0*r3 | |
1085 | vpshufd \$0x10,`16*7-64`($ctx),$H2 # r4^n | |
1086 | vpmuludq $T4,$H4,$H1 # h4*s3 | |
1087 | vpaddq $H1,$D2,$D2 # d2 += h4*s3 | |
1088 | vpshufd \$0x10,`16*8-64`($ctx),$H3 # s4^n | |
1089 | vpmuludq $T3,$H4,$H0 # h3*s3 | |
1090 | vpaddq $H0,$D1,$D1 # d1 += h3*s3 | |
1091 | vpmuludq $T2,$H4,$H4 # h2*s3 | |
1092 | vpaddq $H4,$D0,$D0 # d0 += h2*s3 | |
1093 | ||
1094 | vpmuludq $T0,$H2,$H2 # h0*r4 | |
1095 | vpaddq $H2,$D4,$D4 # h4 = d4 + h0*r4 | |
1096 | vpmuludq $T4,$H3,$H1 # h4*s4 | |
1097 | vpaddq $H1,$D3,$D3 # h3 = d3 + h4*s4 | |
1098 | vpmuludq $T3,$H3,$H0 # h3*s4 | |
1099 | vpaddq $H0,$D2,$D2 # h2 = d2 + h3*s4 | |
1100 | vpmuludq $T2,$H3,$H1 # h2*s4 | |
1101 | vpaddq $H1,$D1,$D1 # h1 = d1 + h2*s4 | |
1102 | vpmuludq $T1,$H3,$H3 # h1*s4 | |
1103 | vpaddq $H3,$D0,$D0 # h0 = d0 + h1*s4 | |
1104 | ||
1105 | jz .Lshort_tail_avx | |
1106 | ||
1107 | vmovdqu 16*0($inp),$H0 # load input | |
1108 | vmovdqu 16*1($inp),$H1 | |
1109 | ||
1110 | vpsrldq \$6,$H0,$H2 # splat input | |
1111 | vpsrldq \$6,$H1,$H3 | |
1112 | vpunpckhqdq $H1,$H0,$H4 # 4 | |
1113 | vpunpcklqdq $H1,$H0,$H0 # 0:1 | |
1114 | vpunpcklqdq $H3,$H2,$H3 # 2:3 | |
1115 | ||
1116 | vpsrlq \$40,$H4,$H4 # 4 | |
1117 | vpsrlq \$26,$H0,$H1 | |
1118 | vpand $MASK,$H0,$H0 # 0 | |
1119 | vpsrlq \$4,$H3,$H2 | |
1120 | vpand $MASK,$H1,$H1 # 1 | |
1121 | vpsrlq \$30,$H3,$H3 | |
1122 | vpand $MASK,$H2,$H2 # 2 | |
1123 | vpand $MASK,$H3,$H3 # 3 | |
1124 | vpor 32(%rcx),$H4,$H4 # padbit, yes, always | |
1125 | ||
1126 | vpshufd \$0x32,`16*0-64`($ctx),$T4 # r0^n, 34xx -> x3x4 | |
1127 | vpaddq 0x00(%r11),$H0,$H0 | |
1128 | vpaddq 0x10(%r11),$H1,$H1 | |
1129 | vpaddq 0x20(%r11),$H2,$H2 | |
1130 | vpaddq 0x30(%r11),$H3,$H3 | |
1131 | vpaddq 0x40(%r11),$H4,$H4 | |
1132 | ||
1133 | ################################################################ | |
1134 | # multiply (inp[0:1]+hash) by r^4:r^3 and accumulate | |
1135 | ||
1136 | vpmuludq $H0,$T4,$T0 # h0*r0 | |
1137 | vpaddq $T0,$D0,$D0 # d0 += h0*r0 | |
1138 | vpmuludq $H1,$T4,$T1 # h1*r0 | |
1139 | vpaddq $T1,$D1,$D1 # d1 += h1*r0 | |
1140 | vpmuludq $H2,$T4,$T0 # h2*r0 | |
1141 | vpaddq $T0,$D2,$D2 # d2 += h2*r0 | |
1142 | vpshufd \$0x32,`16*1-64`($ctx),$T2 # r1^n | |
1143 | vpmuludq $H3,$T4,$T1 # h3*r0 | |
1144 | vpaddq $T1,$D3,$D3 # d3 += h3*r0 | |
1145 | vpmuludq $H4,$T4,$T4 # h4*r0 | |
1146 | vpaddq $T4,$D4,$D4 # d4 += h4*r0 | |
1147 | ||
1148 | vpmuludq $H3,$T2,$T0 # h3*r1 | |
1149 | vpaddq $T0,$D4,$D4 # d4 += h3*r1 | |
1150 | vpshufd \$0x32,`16*2-64`($ctx),$T3 # s1 | |
1151 | vpmuludq $H2,$T2,$T1 # h2*r1 | |
1152 | vpaddq $T1,$D3,$D3 # d3 += h2*r1 | |
1153 | vpshufd \$0x32,`16*3-64`($ctx),$T4 # r2 | |
1154 | vpmuludq $H1,$T2,$T0 # h1*r1 | |
1155 | vpaddq $T0,$D2,$D2 # d2 += h1*r1 | |
1156 | vpmuludq $H0,$T2,$T2 # h0*r1 | |
1157 | vpaddq $T2,$D1,$D1 # d1 += h0*r1 | |
1158 | vpmuludq $H4,$T3,$T3 # h4*s1 | |
1159 | vpaddq $T3,$D0,$D0 # d0 += h4*s1 | |
1160 | ||
1161 | vpshufd \$0x32,`16*4-64`($ctx),$T2 # s2 | |
1162 | vpmuludq $H2,$T4,$T1 # h2*r2 | |
1163 | vpaddq $T1,$D4,$D4 # d4 += h2*r2 | |
1164 | vpmuludq $H1,$T4,$T0 # h1*r2 | |
1165 | vpaddq $T0,$D3,$D3 # d3 += h1*r2 | |
1166 | vpshufd \$0x32,`16*5-64`($ctx),$T3 # r3 | |
1167 | vpmuludq $H0,$T4,$T4 # h0*r2 | |
1168 | vpaddq $T4,$D2,$D2 # d2 += h0*r2 | |
1169 | vpmuludq $H4,$T2,$T1 # h4*s2 | |
1170 | vpaddq $T1,$D1,$D1 # d1 += h4*s2 | |
1171 | vpshufd \$0x32,`16*6-64`($ctx),$T4 # s3 | |
1172 | vpmuludq $H3,$T2,$T2 # h3*s2 | |
1173 | vpaddq $T2,$D0,$D0 # d0 += h3*s2 | |
1174 | ||
1175 | vpmuludq $H1,$T3,$T0 # h1*r3 | |
1176 | vpaddq $T0,$D4,$D4 # d4 += h1*r3 | |
1177 | vpmuludq $H0,$T3,$T3 # h0*r3 | |
1178 | vpaddq $T3,$D3,$D3 # d3 += h0*r3 | |
1179 | vpshufd \$0x32,`16*7-64`($ctx),$T2 # r4 | |
1180 | vpmuludq $H4,$T4,$T1 # h4*s3 | |
1181 | vpaddq $T1,$D2,$D2 # d2 += h4*s3 | |
1182 | vpshufd \$0x32,`16*8-64`($ctx),$T3 # s4 | |
1183 | vpmuludq $H3,$T4,$T0 # h3*s3 | |
1184 | vpaddq $T0,$D1,$D1 # d1 += h3*s3 | |
1185 | vpmuludq $H2,$T4,$T4 # h2*s3 | |
1186 | vpaddq $T4,$D0,$D0 # d0 += h2*s3 | |
1187 | ||
1188 | vpmuludq $H0,$T2,$T2 # h0*r4 | |
1189 | vpaddq $T2,$D4,$D4 # d4 += h0*r4 | |
1190 | vpmuludq $H4,$T3,$T1 # h4*s4 | |
1191 | vpaddq $T1,$D3,$D3 # d3 += h4*s4 | |
1192 | vpmuludq $H3,$T3,$T0 # h3*s4 | |
1193 | vpaddq $T0,$D2,$D2 # d2 += h3*s4 | |
1194 | vpmuludq $H2,$T3,$T1 # h2*s4 | |
1195 | vpaddq $T1,$D1,$D1 # d1 += h2*s4 | |
1196 | vpmuludq $H1,$T3,$T3 # h1*s4 | |
1197 | vpaddq $T3,$D0,$D0 # d0 += h1*s4 | |
1198 | ||
1199 | .Lshort_tail_avx: | |
1ea8ae50 AP |
1200 | ################################################################ |
1201 | # horizontal addition | |
1202 | ||
1203 | vpsrldq \$8,$D4,$T4 | |
1204 | vpsrldq \$8,$D3,$T3 | |
1205 | vpsrldq \$8,$D1,$T1 | |
1206 | vpsrldq \$8,$D0,$T0 | |
1207 | vpsrldq \$8,$D2,$T2 | |
1208 | vpaddq $T3,$D3,$D3 | |
1209 | vpaddq $T4,$D4,$D4 | |
1210 | vpaddq $T0,$D0,$D0 | |
1211 | vpaddq $T1,$D1,$D1 | |
1212 | vpaddq $T2,$D2,$D2 | |
1213 | ||
a98c648e AP |
1214 | ################################################################ |
1215 | # lazy reduction | |
1216 | ||
1217 | vpsrlq \$26,$D3,$H3 | |
1218 | vpand $MASK,$D3,$D3 | |
1219 | vpaddq $H3,$D4,$D4 # h3 -> h4 | |
1220 | ||
1221 | vpsrlq \$26,$D0,$H0 | |
1222 | vpand $MASK,$D0,$D0 | |
1223 | vpaddq $H0,$D1,$D1 # h0 -> h1 | |
1224 | ||
1225 | vpsrlq \$26,$D4,$H4 | |
1226 | vpand $MASK,$D4,$D4 | |
1227 | ||
1228 | vpsrlq \$26,$D1,$H1 | |
1229 | vpand $MASK,$D1,$D1 | |
1230 | vpaddq $H1,$D2,$D2 # h1 -> h2 | |
1231 | ||
1232 | vpaddq $H4,$D0,$D0 | |
1233 | vpsllq \$2,$H4,$H4 | |
1234 | vpaddq $H4,$D0,$D0 # h4 -> h0 | |
1235 | ||
1236 | vpsrlq \$26,$D2,$H2 | |
1237 | vpand $MASK,$D2,$D2 | |
1238 | vpaddq $H2,$D3,$D3 # h2 -> h3 | |
1239 | ||
1240 | vpsrlq \$26,$D0,$H0 | |
1241 | vpand $MASK,$D0,$D0 | |
1242 | vpaddq $H0,$D1,$D1 # h0 -> h1 | |
1243 | ||
1244 | vpsrlq \$26,$D3,$H3 | |
1245 | vpand $MASK,$D3,$D3 | |
1246 | vpaddq $H3,$D4,$D4 # h3 -> h4 | |
1247 | ||
1ea8ae50 AP |
1248 | vmovd $D0,`4*0-48-64`($ctx) # save partially reduced |
1249 | vmovd $D1,`4*1-48-64`($ctx) | |
1250 | vmovd $D2,`4*2-48-64`($ctx) | |
1251 | vmovd $D3,`4*3-48-64`($ctx) | |
1252 | vmovd $D4,`4*4-48-64`($ctx) | |
a98c648e AP |
1253 | ___ |
1254 | $code.=<<___ if ($win64); | |
1255 | vmovdqa 0x50(%r11),%xmm6 | |
1256 | vmovdqa 0x60(%r11),%xmm7 | |
1257 | vmovdqa 0x70(%r11),%xmm8 | |
1258 | vmovdqa 0x80(%r11),%xmm9 | |
1259 | vmovdqa 0x90(%r11),%xmm10 | |
1260 | vmovdqa 0xa0(%r11),%xmm11 | |
1261 | vmovdqa 0xb0(%r11),%xmm12 | |
1262 | vmovdqa 0xc0(%r11),%xmm13 | |
1263 | vmovdqa 0xd0(%r11),%xmm14 | |
1264 | vmovdqa 0xe0(%r11),%xmm15 | |
1265 | lea 0xf8(%r11),%rsp | |
1266 | .Ldo_avx_epilogue: | |
1267 | ___ | |
1268 | $code.=<<___ if (!$win64); | |
1269 | lea 0x58(%r11),%rsp | |
1270 | ___ | |
1271 | $code.=<<___; | |
1272 | vzeroupper | |
1273 | ret | |
1274 | .size poly1305_blocks_avx,.-poly1305_blocks_avx | |
1275 | ||
1276 | .type poly1305_emit_avx,\@function,3 | |
1277 | .align 32 | |
1278 | poly1305_emit_avx: | |
1279 | cmpl \$0,20($ctx) # is_base2_26? | |
a85dbf11 | 1280 | je .Lemit |
a98c648e AP |
1281 | |
1282 | mov 0($ctx),%eax # load hash value base 2^26 | |
1283 | mov 4($ctx),%ecx | |
1284 | mov 8($ctx),%r8d | |
1285 | mov 12($ctx),%r11d | |
1286 | mov 16($ctx),%r10d | |
1287 | ||
1288 | shl \$26,%rcx # base 2^26 -> base 2^64 | |
1289 | mov %r8,%r9 | |
1290 | shl \$52,%r8 | |
1291 | add %rcx,%rax | |
1292 | shr \$12,%r9 | |
1293 | add %rax,%r8 # h0 | |
1294 | adc \$0,%r9 | |
1295 | ||
1296 | shl \$14,%r11 | |
1297 | mov %r10,%rax | |
1298 | shr \$24,%r10 | |
1299 | add %r11,%r9 | |
1300 | shl \$40,%rax | |
1301 | add %rax,%r9 # h1 | |
1302 | adc \$0,%r10 # h2 | |
1303 | ||
1304 | mov %r10,%rax # could be partially reduced, so reduce | |
1305 | mov %r10,%rcx | |
1306 | and \$3,%r10 | |
1307 | shr \$2,%rax | |
1308 | and \$-4,%rcx | |
1309 | add %rcx,%rax | |
1310 | add %rax,%r8 | |
1311 | adc \$0,%r9 | |
1312 | ||
1313 | mov %r8,%rax | |
1314 | add \$5,%r8 # compare to modulus | |
1315 | mov %r9,%rcx | |
1316 | adc \$0,%r9 | |
1317 | adc \$0,%r10 | |
1318 | shr \$2,%r10 # did 130-bit value overfow? | |
1319 | cmovnz %r8,%rax | |
1320 | cmovnz %r9,%rcx | |
1321 | ||
1322 | add 0($nonce),%rax # accumulate nonce | |
1323 | adc 8($nonce),%rcx | |
1324 | mov %rax,0($mac) # write result | |
1325 | mov %rcx,8($mac) | |
1326 | ||
1327 | ret | |
1328 | .size poly1305_emit_avx,.-poly1305_emit_avx | |
1329 | ___ | |
1330 | ||
1331 | if ($avx>1) { | |
1332 | my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) = | |
1333 | map("%ymm$_",(0..15)); | |
1334 | my $S4=$MASK; | |
1335 | ||
1336 | $code.=<<___; | |
1337 | .type poly1305_blocks_avx2,\@function,4 | |
1338 | .align 32 | |
1339 | poly1305_blocks_avx2: | |
1340 | mov 20($ctx),%r8d # is_base2_26 | |
1341 | cmp \$128,$len | |
1342 | jae .Lblocks_avx2 | |
1343 | test %r8d,%r8d | |
a85dbf11 | 1344 | jz .Lblocks |
a98c648e AP |
1345 | |
1346 | .Lblocks_avx2: | |
1347 | and \$-16,$len | |
1348 | jz .Lno_data_avx2 | |
1349 | ||
1350 | vzeroupper | |
1351 | ||
1352 | test %r8d,%r8d | |
1353 | jz .Lbase2_64_avx2 | |
1354 | ||
1355 | test \$63,$len | |
1356 | jz .Leven_avx2 | |
1357 | ||
1358 | push %rbx | |
1359 | push %rbp | |
1360 | push %r12 | |
1361 | push %r13 | |
1362 | push %r14 | |
1363 | push %r15 | |
1364 | .Lblocks_avx2_body: | |
1365 | ||
1366 | mov $len,%r15 # reassign $len | |
1367 | ||
1368 | mov 0($ctx),$d1 # load hash value | |
1369 | mov 8($ctx),$d2 | |
1370 | mov 16($ctx),$h2#d | |
1371 | ||
1372 | mov 24($ctx),$r0 # load r | |
1373 | mov 32($ctx),$s1 | |
1374 | ||
1375 | ################################# base 2^26 -> base 2^64 | |
1376 | mov $d1#d,$h0#d | |
1377 | and \$-1<<31,$d1 | |
1378 | mov $d2,$r1 # borrow $r1 | |
1379 | mov $d2#d,$h1#d | |
1380 | and \$-1<<31,$d2 | |
1381 | ||
1382 | shr \$6,$d1 | |
1383 | shl \$52,$r1 | |
1384 | add $d1,$h0 | |
1385 | shr \$12,$h1 | |
1386 | shr \$18,$d2 | |
1387 | add $r1,$h0 | |
1388 | adc $d2,$h1 | |
1389 | ||
1390 | mov $h2,$d1 | |
1391 | shl \$40,$d1 | |
1392 | shr \$24,$h2 | |
1393 | add $d1,$h1 | |
1394 | adc \$0,$h2 # can be partially reduced... | |
1395 | ||
1396 | mov \$-4,$d2 # ... so reduce | |
1397 | mov $h2,$d1 | |
1398 | and $h2,$d2 | |
1399 | shr \$2,$d1 | |
1400 | and \$3,$h2 | |
1401 | add $d2,$d1 # =*5 | |
1402 | add $d1,$h0 | |
1403 | adc \$0,$h1 | |
1404 | ||
1405 | mov $s1,$r1 | |
1406 | mov $s1,%rax | |
1407 | shr \$2,$s1 | |
1408 | add $r1,$s1 # s1 = r1 + (r1 >> 2) | |
1409 | ||
1410 | .Lbase2_26_pre_avx2: | |
1411 | add 0($inp),$h0 # accumulate input | |
1412 | adc 8($inp),$h1 | |
1413 | lea 16($inp),$inp | |
1414 | adc $padbit,$h2 | |
1415 | sub \$16,%r15 | |
1416 | ||
1417 | call __poly1305_block | |
1418 | mov $r1,%rax | |
1419 | ||
1420 | test \$63,%r15 | |
1421 | jnz .Lbase2_26_pre_avx2 | |
1422 | ||
1423 | test $padbit,$padbit # if $padbit is zero, | |
1424 | jz .Lstore_base2_64_avx2 # store hash in base 2^64 format | |
1425 | ||
1426 | ################################# base 2^64 -> base 2^26 | |
1427 | mov $h0,%rax | |
1428 | mov $h0,%rdx | |
1429 | shr \$52,$h0 | |
1430 | mov $h1,$r0 | |
1431 | mov $h1,$r1 | |
1432 | shr \$26,%rdx | |
1433 | and \$0x3ffffff,%rax # h[0] | |
1434 | shl \$12,$r0 | |
1435 | and \$0x3ffffff,%rdx # h[1] | |
1436 | shr \$14,$h1 | |
1437 | or $r0,$h0 | |
1438 | shl \$24,$h2 | |
1439 | and \$0x3ffffff,$h0 # h[2] | |
1440 | shr \$40,$r1 | |
1441 | and \$0x3ffffff,$h1 # h[3] | |
1442 | or $r1,$h2 # h[4] | |
1443 | ||
1444 | test %r15,%r15 | |
1445 | jz .Lstore_base2_26_avx2 | |
1446 | ||
1447 | vmovd %rax#d,%x#$H0 | |
1448 | vmovd %rdx#d,%x#$H1 | |
1449 | vmovd $h0#d,%x#$H2 | |
1450 | vmovd $h1#d,%x#$H3 | |
1451 | vmovd $h2#d,%x#$H4 | |
1452 | jmp .Lproceed_avx2 | |
1453 | ||
1454 | .align 32 | |
1455 | .Lstore_base2_64_avx2: | |
1456 | mov $h0,0($ctx) | |
1457 | mov $h1,8($ctx) | |
1458 | mov $h2,16($ctx) # note that is_base2_26 is zeroed | |
1459 | jmp .Ldone_avx2 | |
1460 | ||
1461 | .align 16 | |
1462 | .Lstore_base2_26_avx2: | |
1463 | mov %rax#d,0($ctx) # store hash value base 2^26 | |
1464 | mov %rdx#d,4($ctx) | |
1465 | mov $h0#d,8($ctx) | |
1466 | mov $h1#d,12($ctx) | |
1467 | mov $h2#d,16($ctx) | |
1468 | .align 16 | |
1469 | .Ldone_avx2: | |
1470 | mov 0(%rsp),%r15 | |
1471 | mov 8(%rsp),%r14 | |
1472 | mov 16(%rsp),%r13 | |
1473 | mov 24(%rsp),%r12 | |
1474 | mov 32(%rsp),%rbp | |
1475 | mov 40(%rsp),%rbx | |
1476 | lea 48(%rsp),%rsp | |
1477 | .Lno_data_avx2: | |
1478 | .Lblocks_avx2_epilogue: | |
1479 | ret | |
1480 | ||
1481 | .align 32 | |
1482 | .Lbase2_64_avx2: | |
1483 | push %rbx | |
1484 | push %rbp | |
1485 | push %r12 | |
1486 | push %r13 | |
1487 | push %r14 | |
1488 | push %r15 | |
1489 | .Lbase2_64_avx2_body: | |
1490 | ||
1491 | mov $len,%r15 # reassign $len | |
1492 | ||
1493 | mov 24($ctx),$r0 # load r | |
1494 | mov 32($ctx),$s1 | |
1495 | ||
1496 | mov 0($ctx),$h0 # load hash value | |
1497 | mov 8($ctx),$h1 | |
1498 | mov 16($ctx),$h2#d | |
1499 | ||
1500 | mov $s1,$r1 | |
1501 | mov $s1,%rax | |
1502 | shr \$2,$s1 | |
1503 | add $r1,$s1 # s1 = r1 + (r1 >> 2) | |
1504 | ||
1505 | test \$63,$len | |
1506 | jz .Linit_avx2 | |
1507 | ||
1508 | .Lbase2_64_pre_avx2: | |
1509 | add 0($inp),$h0 # accumulate input | |
1510 | adc 8($inp),$h1 | |
1511 | lea 16($inp),$inp | |
1512 | adc $padbit,$h2 | |
1513 | sub \$16,%r15 | |
1514 | ||
1515 | call __poly1305_block | |
1516 | mov $r1,%rax | |
1517 | ||
1518 | test \$63,%r15 | |
1519 | jnz .Lbase2_64_pre_avx2 | |
1520 | ||
1521 | .Linit_avx2: | |
1522 | ################################# base 2^64 -> base 2^26 | |
1523 | mov $h0,%rax | |
1524 | mov $h0,%rdx | |
1525 | shr \$52,$h0 | |
1526 | mov $h1,$d1 | |
1527 | mov $h1,$d2 | |
1528 | shr \$26,%rdx | |
1529 | and \$0x3ffffff,%rax # h[0] | |
1530 | shl \$12,$d1 | |
1531 | and \$0x3ffffff,%rdx # h[1] | |
1532 | shr \$14,$h1 | |
1533 | or $d1,$h0 | |
1534 | shl \$24,$h2 | |
1535 | and \$0x3ffffff,$h0 # h[2] | |
1536 | shr \$40,$d2 | |
1537 | and \$0x3ffffff,$h1 # h[3] | |
1538 | or $d2,$h2 # h[4] | |
1539 | ||
1540 | vmovd %rax#d,%x#$H0 | |
1541 | vmovd %rdx#d,%x#$H1 | |
1542 | vmovd $h0#d,%x#$H2 | |
1543 | vmovd $h1#d,%x#$H3 | |
1544 | vmovd $h2#d,%x#$H4 | |
1545 | movl \$1,20($ctx) # set is_base2_26 | |
1546 | ||
1547 | call __poly1305_init_avx | |
1548 | ||
1549 | .Lproceed_avx2: | |
1550 | mov %r15,$len | |
1551 | ||
1552 | mov 0(%rsp),%r15 | |
1553 | mov 8(%rsp),%r14 | |
1554 | mov 16(%rsp),%r13 | |
1555 | mov 24(%rsp),%r12 | |
1556 | mov 32(%rsp),%rbp | |
1557 | mov 40(%rsp),%rbx | |
1558 | lea 48(%rsp),%rax | |
1559 | lea 48(%rsp),%rsp | |
1560 | .Lbase2_64_avx2_epilogue: | |
1561 | jmp .Ldo_avx2 | |
1562 | ||
1563 | .align 32 | |
1564 | .Leven_avx2: | |
1565 | vmovd 4*0($ctx),%x#$H0 # load hash value base 2^26 | |
1566 | vmovd 4*1($ctx),%x#$H1 | |
1567 | vmovd 4*2($ctx),%x#$H2 | |
1568 | vmovd 4*3($ctx),%x#$H3 | |
1569 | vmovd 4*4($ctx),%x#$H4 | |
1570 | ||
1571 | .Ldo_avx2: | |
1572 | ___ | |
1573 | $code.=<<___ if (!$win64); | |
1574 | lea -8(%rsp),%r11 | |
1575 | sub \$0x128,%rsp | |
1576 | ___ | |
1577 | $code.=<<___ if ($win64); | |
1578 | lea -0xf8(%rsp),%r11 | |
1579 | sub \$0x1c8,%rsp | |
1580 | vmovdqa %xmm6,0x50(%r11) | |
1581 | vmovdqa %xmm7,0x60(%r11) | |
1582 | vmovdqa %xmm8,0x70(%r11) | |
1583 | vmovdqa %xmm9,0x80(%r11) | |
1584 | vmovdqa %xmm10,0x90(%r11) | |
1585 | vmovdqa %xmm11,0xa0(%r11) | |
1586 | vmovdqa %xmm12,0xb0(%r11) | |
1587 | vmovdqa %xmm13,0xc0(%r11) | |
1588 | vmovdqa %xmm14,0xd0(%r11) | |
1589 | vmovdqa %xmm15,0xe0(%r11) | |
1590 | .Ldo_avx2_body: | |
1591 | ___ | |
1592 | $code.=<<___; | |
1593 | lea 48+64($ctx),$ctx # size optimization | |
1594 | lea .Lconst(%rip),%rcx | |
1595 | ||
1596 | # expand and copy pre-calculated table to stack | |
1597 | vmovdqu `16*0-64`($ctx),%x#$T2 | |
1598 | and \$-512,%rsp | |
1599 | vmovdqu `16*1-64`($ctx),%x#$T3 | |
1600 | vmovdqu `16*2-64`($ctx),%x#$T4 | |
1601 | vmovdqu `16*3-64`($ctx),%x#$D0 | |
1602 | vmovdqu `16*4-64`($ctx),%x#$D1 | |
1603 | vmovdqu `16*5-64`($ctx),%x#$D2 | |
1604 | vmovdqu `16*6-64`($ctx),%x#$D3 | |
1605 | vpermq \$0x15,$T2,$T2 # 00003412 -> 12343434 | |
1606 | vmovdqu `16*7-64`($ctx),%x#$D4 | |
1607 | vpermq \$0x15,$T3,$T3 | |
1608 | vpshufd \$0xc8,$T2,$T2 # 12343434 -> 14243444 | |
1609 | vmovdqu `16*8-64`($ctx),%x#$MASK | |
1610 | vpermq \$0x15,$T4,$T4 | |
1611 | vpshufd \$0xc8,$T3,$T3 | |
1612 | vmovdqa $T2,0x00(%rsp) | |
1613 | vpermq \$0x15,$D0,$D0 | |
1614 | vpshufd \$0xc8,$T4,$T4 | |
1615 | vmovdqa $T3,0x20(%rsp) | |
1616 | vpermq \$0x15,$D1,$D1 | |
1617 | vpshufd \$0xc8,$D0,$D0 | |
1618 | vmovdqa $T4,0x40(%rsp) | |
1619 | vpermq \$0x15,$D2,$D2 | |
1620 | vpshufd \$0xc8,$D1,$D1 | |
1621 | vmovdqa $D0,0x60(%rsp) | |
1622 | vpermq \$0x15,$D3,$D3 | |
1623 | vpshufd \$0xc8,$D2,$D2 | |
1624 | vmovdqa $D1,0x80(%rsp) | |
1625 | vpermq \$0x15,$D4,$D4 | |
1626 | vpshufd \$0xc8,$D3,$D3 | |
1627 | vmovdqa $D2,0xa0(%rsp) | |
1628 | vpermq \$0x15,$MASK,$MASK | |
1629 | vpshufd \$0xc8,$D4,$D4 | |
1630 | vmovdqa $D3,0xc0(%rsp) | |
1631 | vpshufd \$0xc8,$MASK,$MASK | |
1632 | vmovdqa $D4,0xe0(%rsp) | |
1633 | vmovdqa $MASK,0x100(%rsp) | |
1634 | vmovdqa 64(%rcx),$MASK # .Lmask26 | |
1635 | ||
1636 | ################################################################ | |
1637 | # load input | |
1638 | vmovdqu 16*0($inp),%x#$T0 | |
1639 | vmovdqu 16*1($inp),%x#$T1 | |
1640 | vinserti128 \$1,16*2($inp),$T0,$T0 | |
1641 | vinserti128 \$1,16*3($inp),$T1,$T1 | |
1642 | lea 16*4($inp),$inp | |
1643 | ||
1644 | vpsrldq \$6,$T0,$T2 # splat input | |
1645 | vpsrldq \$6,$T1,$T3 | |
1646 | vpunpckhqdq $T1,$T0,$T4 # 4 | |
1647 | vpunpcklqdq $T3,$T2,$T2 # 2:3 | |
1648 | vpunpcklqdq $T1,$T0,$T0 # 0:1 | |
1649 | ||
1650 | vpsrlq \$30,$T2,$T3 | |
1651 | vpsrlq \$4,$T2,$T2 | |
1652 | vpsrlq \$26,$T0,$T1 | |
1653 | vpsrlq \$40,$T4,$T4 # 4 | |
1654 | vpand $MASK,$T2,$T2 # 2 | |
1655 | vpand $MASK,$T0,$T0 # 0 | |
1656 | vpand $MASK,$T1,$T1 # 1 | |
1657 | vpand $MASK,$T3,$T3 # 3 | |
1658 | vpor 32(%rcx),$T4,$T4 # padbit, yes, always | |
1659 | ||
1660 | lea 0x90(%rsp),%rax # size optimization | |
1661 | vpaddq $H2,$T2,$H2 # accumulate input | |
1662 | sub \$64,$len | |
1663 | jz .Ltail_avx2 | |
1664 | jmp .Loop_avx2 | |
1665 | ||
1666 | .align 32 | |
1667 | .Loop_avx2: | |
1668 | ################################################################ | |
1669 | # ((inp[0]*r^4+r[4])*r^4+r[8])*r^4 | |
1670 | # ((inp[1]*r^4+r[5])*r^4+r[9])*r^3 | |
1671 | # ((inp[2]*r^4+r[6])*r^4+r[10])*r^2 | |
1672 | # ((inp[3]*r^4+r[7])*r^4+r[11])*r^1 | |
1673 | # \________/\________/ | |
1674 | ################################################################ | |
1675 | #vpaddq $H2,$T2,$H2 # accumulate input | |
1676 | vpaddq $H0,$T0,$H0 | |
1677 | vmovdqa `32*0`(%rsp),$T0 # r0^4 | |
1678 | vpaddq $H1,$T1,$H1 | |
1679 | vmovdqa `32*1`(%rsp),$T1 # r1^4 | |
1680 | vpaddq $H3,$T3,$H3 | |
1681 | vmovdqa `32*3`(%rsp),$T2 # r2^4 | |
1682 | vpaddq $H4,$T4,$H4 | |
1683 | vmovdqa `32*6-0x90`(%rax),$T3 # s3^4 | |
1684 | vmovdqa `32*8-0x90`(%rax),$S4 # s4^4 | |
1685 | ||
1686 | # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 | |
1687 | # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 | |
1688 | # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 | |
1689 | # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 | |
1690 | # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 | |
1691 | # | |
1692 | # however, as h2 is "chronologically" first one available pull | |
1693 | # corresponding operations up, so it's | |
1694 | # | |
1695 | # d4 = h2*r2 + h4*r0 + h3*r1 + h1*r3 + h0*r4 | |
1696 | # d3 = h2*r1 + h3*r0 + h1*r2 + h0*r3 + h4*5*r4 | |
1697 | # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 | |
1698 | # d1 = h2*5*r4 + h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 | |
1699 | # d0 = h2*5*r3 + h0*r0 + h4*5*r1 + h3*5*r2 + h1*5*r4 | |
1700 | ||
1701 | vpmuludq $H2,$T0,$D2 # d2 = h2*r0 | |
1702 | vpmuludq $H2,$T1,$D3 # d3 = h2*r1 | |
1703 | vpmuludq $H2,$T2,$D4 # d4 = h2*r2 | |
1704 | vpmuludq $H2,$T3,$D0 # d0 = h2*s3 | |
1705 | vpmuludq $H2,$S4,$D1 # d1 = h2*s4 | |
1706 | ||
1707 | vpmuludq $H0,$T1,$T4 # h0*r1 | |
1708 | vpmuludq $H1,$T1,$H2 # h1*r1, borrow $H2 as temp | |
1709 | vpaddq $T4,$D1,$D1 # d1 += h0*r1 | |
1710 | vpaddq $H2,$D2,$D2 # d2 += h1*r1 | |
1711 | vpmuludq $H3,$T1,$T4 # h3*r1 | |
1712 | vpmuludq `32*2`(%rsp),$H4,$H2 # h4*s1 | |
1713 | vpaddq $T4,$D4,$D4 # d4 += h3*r1 | |
1714 | vpaddq $H2,$D0,$D0 # d0 += h4*s1 | |
1715 | vmovdqa `32*4-0x90`(%rax),$T1 # s2 | |
1716 | ||
1717 | vpmuludq $H0,$T0,$T4 # h0*r0 | |
1718 | vpmuludq $H1,$T0,$H2 # h1*r0 | |
1719 | vpaddq $T4,$D0,$D0 # d0 += h0*r0 | |
1720 | vpaddq $H2,$D1,$D1 # d1 += h1*r0 | |
1721 | vpmuludq $H3,$T0,$T4 # h3*r0 | |
1722 | vpmuludq $H4,$T0,$H2 # h4*r0 | |
1723 | vmovdqu 16*0($inp),%x#$T0 # load input | |
1724 | vpaddq $T4,$D3,$D3 # d3 += h3*r0 | |
1725 | vpaddq $H2,$D4,$D4 # d4 += h4*r0 | |
1726 | vinserti128 \$1,16*2($inp),$T0,$T0 | |
1727 | ||
1728 | vpmuludq $H3,$T1,$T4 # h3*s2 | |
1729 | vpmuludq $H4,$T1,$H2 # h4*s2 | |
1730 | vmovdqu 16*1($inp),%x#$T1 | |
1731 | vpaddq $T4,$D0,$D0 # d0 += h3*s2 | |
1732 | vpaddq $H2,$D1,$D1 # d1 += h4*s2 | |
1733 | vmovdqa `32*5-0x90`(%rax),$H2 # r3 | |
1734 | vpmuludq $H1,$T2,$T4 # h1*r2 | |
1735 | vpmuludq $H0,$T2,$T2 # h0*r2 | |
1736 | vpaddq $T4,$D3,$D3 # d3 += h1*r2 | |
1737 | vpaddq $T2,$D2,$D2 # d2 += h0*r2 | |
1738 | vinserti128 \$1,16*3($inp),$T1,$T1 | |
1739 | lea 16*4($inp),$inp | |
1740 | ||
1741 | vpmuludq $H1,$H2,$T4 # h1*r3 | |
1742 | vpmuludq $H0,$H2,$H2 # h0*r3 | |
1743 | vpsrldq \$6,$T0,$T2 # splat input | |
1744 | vpaddq $T4,$D4,$D4 # d4 += h1*r3 | |
1745 | vpaddq $H2,$D3,$D3 # d3 += h0*r3 | |
1746 | vpmuludq $H3,$T3,$T4 # h3*s3 | |
1747 | vpmuludq $H4,$T3,$H2 # h4*s3 | |
1748 | vpsrldq \$6,$T1,$T3 | |
1749 | vpaddq $T4,$D1,$D1 # d1 += h3*s3 | |
1750 | vpaddq $H2,$D2,$D2 # d2 += h4*s3 | |
1751 | vpunpckhqdq $T1,$T0,$T4 # 4 | |
1752 | ||
1753 | vpmuludq $H3,$S4,$H3 # h3*s4 | |
1754 | vpmuludq $H4,$S4,$H4 # h4*s4 | |
1755 | vpunpcklqdq $T1,$T0,$T0 # 0:1 | |
1756 | vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4 | |
1757 | vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4 | |
1758 | vpunpcklqdq $T3,$T2,$T3 # 2:3 | |
1759 | vpmuludq `32*7-0x90`(%rax),$H0,$H4 # h0*r4 | |
1760 | vpmuludq $H1,$S4,$H0 # h1*s4 | |
1761 | vmovdqa 64(%rcx),$MASK # .Lmask26 | |
1762 | vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4 | |
1763 | vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4 | |
1764 | ||
1765 | ################################################################ | |
1766 | # lazy reduction (interleaved with tail of input splat) | |
1767 | ||
1768 | vpsrlq \$26,$H3,$D3 | |
1769 | vpand $MASK,$H3,$H3 | |
1770 | vpaddq $D3,$H4,$H4 # h3 -> h4 | |
1771 | ||
1772 | vpsrlq \$26,$H0,$D0 | |
1773 | vpand $MASK,$H0,$H0 | |
1774 | vpaddq $D0,$D1,$H1 # h0 -> h1 | |
1775 | ||
1776 | vpsrlq \$26,$H4,$D4 | |
1777 | vpand $MASK,$H4,$H4 | |
1778 | ||
1779 | vpsrlq \$4,$T3,$T2 | |
1780 | ||
1781 | vpsrlq \$26,$H1,$D1 | |
1782 | vpand $MASK,$H1,$H1 | |
1783 | vpaddq $D1,$H2,$H2 # h1 -> h2 | |
1784 | ||
1785 | vpaddq $D4,$H0,$H0 | |
1786 | vpsllq \$2,$D4,$D4 | |
1787 | vpaddq $D4,$H0,$H0 # h4 -> h0 | |
1788 | ||
1789 | vpand $MASK,$T2,$T2 # 2 | |
1790 | vpsrlq \$26,$T0,$T1 | |
1791 | ||
1792 | vpsrlq \$26,$H2,$D2 | |
1793 | vpand $MASK,$H2,$H2 | |
1794 | vpaddq $D2,$H3,$H3 # h2 -> h3 | |
1795 | ||
1796 | vpaddq $T2,$H2,$H2 # modulo-scheduled | |
1797 | vpsrlq \$30,$T3,$T3 | |
1798 | ||
1799 | vpsrlq \$26,$H0,$D0 | |
1800 | vpand $MASK,$H0,$H0 | |
1801 | vpaddq $D0,$H1,$H1 # h0 -> h1 | |
1802 | ||
1803 | vpsrlq \$40,$T4,$T4 # 4 | |
1804 | ||
1805 | vpsrlq \$26,$H3,$D3 | |
1806 | vpand $MASK,$H3,$H3 | |
1807 | vpaddq $D3,$H4,$H4 # h3 -> h4 | |
1808 | ||
1809 | vpand $MASK,$T0,$T0 # 0 | |
1810 | vpand $MASK,$T1,$T1 # 1 | |
1811 | vpand $MASK,$T3,$T3 # 3 | |
1812 | vpor 32(%rcx),$T4,$T4 # padbit, yes, always | |
1813 | ||
1814 | sub \$64,$len | |
1815 | jnz .Loop_avx2 | |
1816 | ||
1817 | .byte 0x66,0x90 | |
1818 | .Ltail_avx2: | |
1819 | ################################################################ | |
1820 | # while above multiplications were by r^4 in all lanes, in last | |
1821 | # iteration we multiply least significant lane by r^4 and most | |
1822 | # significant one by r, so copy of above except that references | |
1823 | # to the precomputed table are displaced by 4... | |
1824 | ||
1825 | #vpaddq $H2,$T2,$H2 # accumulate input | |
1826 | vpaddq $H0,$T0,$H0 | |
1827 | vmovdqu `32*0+4`(%rsp),$T0 # r0^4 | |
1828 | vpaddq $H1,$T1,$H1 | |
1829 | vmovdqu `32*1+4`(%rsp),$T1 # r1^4 | |
1830 | vpaddq $H3,$T3,$H3 | |
1831 | vmovdqu `32*3+4`(%rsp),$T2 # r2^4 | |
1832 | vpaddq $H4,$T4,$H4 | |
1833 | vmovdqu `32*6+4-0x90`(%rax),$T3 # s3^4 | |
1834 | vmovdqu `32*8+4-0x90`(%rax),$S4 # s4^4 | |
1835 | ||
1836 | vpmuludq $H2,$T0,$D2 # d2 = h2*r0 | |
1837 | vpmuludq $H2,$T1,$D3 # d3 = h2*r1 | |
1838 | vpmuludq $H2,$T2,$D4 # d4 = h2*r2 | |
1839 | vpmuludq $H2,$T3,$D0 # d0 = h2*s3 | |
1840 | vpmuludq $H2,$S4,$D1 # d1 = h2*s4 | |
1841 | ||
1842 | vpmuludq $H0,$T1,$T4 # h0*r1 | |
1843 | vpmuludq $H1,$T1,$H2 # h1*r1 | |
1844 | vpaddq $T4,$D1,$D1 # d1 += h0*r1 | |
1845 | vpaddq $H2,$D2,$D2 # d2 += h1*r1 | |
1846 | vpmuludq $H3,$T1,$T4 # h3*r1 | |
1847 | vpmuludq `32*2+4`(%rsp),$H4,$H2 # h4*s1 | |
1848 | vpaddq $T4,$D4,$D4 # d4 += h3*r1 | |
1849 | vpaddq $H2,$D0,$D0 # d0 += h4*s1 | |
1850 | ||
1851 | vpmuludq $H0,$T0,$T4 # h0*r0 | |
1852 | vpmuludq $H1,$T0,$H2 # h1*r0 | |
1853 | vpaddq $T4,$D0,$D0 # d0 += h0*r0 | |
1854 | vmovdqu `32*4+4-0x90`(%rax),$T1 # s2 | |
1855 | vpaddq $H2,$D1,$D1 # d1 += h1*r0 | |
1856 | vpmuludq $H3,$T0,$T4 # h3*r0 | |
1857 | vpmuludq $H4,$T0,$H2 # h4*r0 | |
1858 | vpaddq $T4,$D3,$D3 # d3 += h3*r0 | |
1859 | vpaddq $H2,$D4,$D4 # d4 += h4*r0 | |
1860 | ||
1861 | vpmuludq $H3,$T1,$T4 # h3*s2 | |
1862 | vpmuludq $H4,$T1,$H2 # h4*s2 | |
1863 | vpaddq $T4,$D0,$D0 # d0 += h3*s2 | |
1864 | vpaddq $H2,$D1,$D1 # d1 += h4*s2 | |
1865 | vmovdqu `32*5+4-0x90`(%rax),$H2 # r3 | |
1866 | vpmuludq $H1,$T2,$T4 # h1*r2 | |
1867 | vpmuludq $H0,$T2,$T2 # h0*r2 | |
1868 | vpaddq $T4,$D3,$D3 # d3 += h1*r2 | |
1869 | vpaddq $T2,$D2,$D2 # d2 += h0*r2 | |
1870 | ||
1871 | vpmuludq $H1,$H2,$T4 # h1*r3 | |
1872 | vpmuludq $H0,$H2,$H2 # h0*r3 | |
1873 | vpaddq $T4,$D4,$D4 # d4 += h1*r3 | |
1874 | vpaddq $H2,$D3,$D3 # d3 += h0*r3 | |
1875 | vpmuludq $H3,$T3,$T4 # h3*s3 | |
1876 | vpmuludq $H4,$T3,$H2 # h4*s3 | |
1877 | vpaddq $T4,$D1,$D1 # d1 += h3*s3 | |
1878 | vpaddq $H2,$D2,$D2 # d2 += h4*s3 | |
1879 | ||
1880 | vpmuludq $H3,$S4,$H3 # h3*s4 | |
1881 | vpmuludq $H4,$S4,$H4 # h4*s4 | |
1882 | vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4 | |
1883 | vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4 | |
1884 | vpmuludq `32*7+4-0x90`(%rax),$H0,$H4 # h0*r4 | |
1885 | vpmuludq $H1,$S4,$H0 # h1*s4 | |
1886 | vmovdqa 64(%rcx),$MASK # .Lmask26 | |
1887 | vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4 | |
1888 | vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4 | |
1889 | ||
1ea8ae50 AP |
1890 | ################################################################ |
1891 | # horizontal addition | |
1892 | ||
1893 | vpsrldq \$8,$D1,$T1 | |
1894 | vpsrldq \$8,$H2,$T2 | |
1895 | vpsrldq \$8,$H3,$T3 | |
1896 | vpsrldq \$8,$H4,$T4 | |
1897 | vpsrldq \$8,$H0,$T0 | |
1898 | vpaddq $T1,$D1,$D1 | |
1899 | vpaddq $T2,$H2,$H2 | |
1900 | vpaddq $T3,$H3,$H3 | |
1901 | vpaddq $T4,$H4,$H4 | |
1902 | vpaddq $T0,$H0,$H0 | |
1903 | ||
1904 | vpermq \$0x2,$H3,$T3 | |
1905 | vpermq \$0x2,$H4,$T4 | |
1906 | vpermq \$0x2,$H0,$T0 | |
1907 | vpermq \$0x2,$D1,$T1 | |
1908 | vpermq \$0x2,$H2,$T2 | |
1909 | vpaddq $T3,$H3,$H3 | |
1910 | vpaddq $T4,$H4,$H4 | |
1911 | vpaddq $T0,$H0,$H0 | |
1912 | vpaddq $T1,$D1,$D1 | |
1913 | vpaddq $T2,$H2,$H2 | |
1914 | ||
a98c648e AP |
1915 | ################################################################ |
1916 | # lazy reduction | |
1917 | ||
1918 | vpsrlq \$26,$H3,$D3 | |
1919 | vpand $MASK,$H3,$H3 | |
1920 | vpaddq $D3,$H4,$H4 # h3 -> h4 | |
1921 | ||
1922 | vpsrlq \$26,$H0,$D0 | |
1923 | vpand $MASK,$H0,$H0 | |
1924 | vpaddq $D0,$D1,$H1 # h0 -> h1 | |
1925 | ||
1926 | vpsrlq \$26,$H4,$D4 | |
1927 | vpand $MASK,$H4,$H4 | |
1928 | ||
1929 | vpsrlq \$26,$H1,$D1 | |
1930 | vpand $MASK,$H1,$H1 | |
1931 | vpaddq $D1,$H2,$H2 # h1 -> h2 | |
1932 | ||
1933 | vpaddq $D4,$H0,$H0 | |
1934 | vpsllq \$2,$D4,$D4 | |
1935 | vpaddq $D4,$H0,$H0 # h4 -> h0 | |
1936 | ||
1937 | vpsrlq \$26,$H2,$D2 | |
1938 | vpand $MASK,$H2,$H2 | |
1939 | vpaddq $D2,$H3,$H3 # h2 -> h3 | |
1940 | ||
1941 | vpsrlq \$26,$H0,$D0 | |
1942 | vpand $MASK,$H0,$H0 | |
1943 | vpaddq $D0,$H1,$H1 # h0 -> h1 | |
1944 | ||
1945 | vpsrlq \$26,$H3,$D3 | |
1946 | vpand $MASK,$H3,$H3 | |
1947 | vpaddq $D3,$H4,$H4 # h3 -> h4 | |
1948 | ||
a98c648e AP |
1949 | vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced |
1950 | vmovd %x#$H1,`4*1-48-64`($ctx) | |
1951 | vmovd %x#$H2,`4*2-48-64`($ctx) | |
1952 | vmovd %x#$H3,`4*3-48-64`($ctx) | |
1953 | vmovd %x#$H4,`4*4-48-64`($ctx) | |
1954 | ___ | |
1955 | $code.=<<___ if ($win64); | |
1956 | vmovdqa 0x50(%r11),%xmm6 | |
1957 | vmovdqa 0x60(%r11),%xmm7 | |
1958 | vmovdqa 0x70(%r11),%xmm8 | |
1959 | vmovdqa 0x80(%r11),%xmm9 | |
1960 | vmovdqa 0x90(%r11),%xmm10 | |
1961 | vmovdqa 0xa0(%r11),%xmm11 | |
1962 | vmovdqa 0xb0(%r11),%xmm12 | |
1963 | vmovdqa 0xc0(%r11),%xmm13 | |
1964 | vmovdqa 0xd0(%r11),%xmm14 | |
1965 | vmovdqa 0xe0(%r11),%xmm15 | |
1966 | lea 0xf8(%r11),%rsp | |
1967 | .Ldo_avx2_epilogue: | |
1968 | ___ | |
1969 | $code.=<<___ if (!$win64); | |
1970 | lea 8(%r11),%rsp | |
1971 | ___ | |
1972 | $code.=<<___; | |
1973 | vzeroupper | |
1974 | ret | |
1975 | .size poly1305_blocks_avx2,.-poly1305_blocks_avx2 | |
1976 | ___ | |
1977 | } | |
1978 | $code.=<<___; | |
1979 | .align 64 | |
1980 | .Lconst: | |
1981 | .Lmask24: | |
1982 | .long 0x0ffffff,0,0x0ffffff,0,0x0ffffff,0,0x0ffffff,0 | |
1983 | .L129: | |
1984 | .long 1<<24,0,1<<24,0,1<<24,0,1<<24,0 | |
1985 | .Lmask26: | |
1986 | .long 0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0 | |
1987 | .Lfive: | |
1988 | .long 5,0,5,0,5,0,5,0 | |
1989 | ___ | |
1990 | } | |
1991 | ||
1992 | $code.=<<___; | |
1993 | .asciz "Poly1305 for x86_64, CRYPTOGAMS by <appro\@openssl.org>" | |
1994 | .align 16 | |
1995 | ___ | |
1996 | ||
1997 | # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, | |
1998 | # CONTEXT *context,DISPATCHER_CONTEXT *disp) | |
1999 | if ($win64) { | |
2000 | $rec="%rcx"; | |
2001 | $frame="%rdx"; | |
2002 | $context="%r8"; | |
2003 | $disp="%r9"; | |
2004 | ||
2005 | $code.=<<___; | |
2006 | .extern __imp_RtlVirtualUnwind | |
2007 | .type se_handler,\@abi-omnipotent | |
2008 | .align 16 | |
2009 | se_handler: | |
2010 | push %rsi | |
2011 | push %rdi | |
2012 | push %rbx | |
2013 | push %rbp | |
2014 | push %r12 | |
2015 | push %r13 | |
2016 | push %r14 | |
2017 | push %r15 | |
2018 | pushfq | |
2019 | sub \$64,%rsp | |
2020 | ||
2021 | mov 120($context),%rax # pull context->Rax | |
2022 | mov 248($context),%rbx # pull context->Rip | |
2023 | ||
2024 | mov 8($disp),%rsi # disp->ImageBase | |
2025 | mov 56($disp),%r11 # disp->HandlerData | |
2026 | ||
2027 | mov 0(%r11),%r10d # HandlerData[0] | |
2028 | lea (%rsi,%r10),%r10 # prologue label | |
2029 | cmp %r10,%rbx # context->Rip<.Lprologue | |
2030 | jb .Lcommon_seh_tail | |
2031 | ||
2032 | mov 152($context),%rax # pull context->Rsp | |
2033 | ||
2034 | mov 4(%r11),%r10d # HandlerData[1] | |
2035 | lea (%rsi,%r10),%r10 # epilogue label | |
2036 | cmp %r10,%rbx # context->Rip>=.Lepilogue | |
2037 | jae .Lcommon_seh_tail | |
2038 | ||
2039 | lea 48(%rax),%rax | |
2040 | ||
2041 | mov -8(%rax),%rbx | |
2042 | mov -16(%rax),%rbp | |
2043 | mov -24(%rax),%r12 | |
2044 | mov -32(%rax),%r13 | |
2045 | mov -40(%rax),%r14 | |
2046 | mov -48(%rax),%r15 | |
2047 | mov %rbx,144($context) # restore context->Rbx | |
2048 | mov %rbp,160($context) # restore context->Rbp | |
2049 | mov %r12,216($context) # restore context->R12 | |
2050 | mov %r13,224($context) # restore context->R13 | |
2051 | mov %r14,232($context) # restore context->R14 | |
2052 | mov %r15,240($context) # restore context->R14 | |
2053 | ||
2054 | jmp .Lcommon_seh_tail | |
2055 | .size se_handler,.-se_handler | |
2056 | ||
2057 | .type avx_handler,\@abi-omnipotent | |
2058 | .align 16 | |
2059 | avx_handler: | |
2060 | push %rsi | |
2061 | push %rdi | |
2062 | push %rbx | |
2063 | push %rbp | |
2064 | push %r12 | |
2065 | push %r13 | |
2066 | push %r14 | |
2067 | push %r15 | |
2068 | pushfq | |
2069 | sub \$64,%rsp | |
2070 | ||
2071 | mov 120($context),%rax # pull context->Rax | |
2072 | mov 248($context),%rbx # pull context->Rip | |
2073 | ||
2074 | mov 8($disp),%rsi # disp->ImageBase | |
2075 | mov 56($disp),%r11 # disp->HandlerData | |
2076 | ||
2077 | mov 0(%r11),%r10d # HandlerData[0] | |
2078 | lea (%rsi,%r10),%r10 # prologue label | |
2079 | cmp %r10,%rbx # context->Rip<prologue label | |
2080 | jb .Lcommon_seh_tail | |
2081 | ||
2082 | mov 152($context),%rax # pull context->Rsp | |
2083 | ||
2084 | mov 4(%r11),%r10d # HandlerData[1] | |
2085 | lea (%rsi,%r10),%r10 # epilogue label | |
2086 | cmp %r10,%rbx # context->Rip>=epilogue label | |
2087 | jae .Lcommon_seh_tail | |
2088 | ||
2089 | mov 208($context),%rax # pull context->R11 | |
2090 | ||
2091 | lea 0x50(%rax),%rsi | |
2092 | lea 0xf8(%rax),%rax | |
2093 | lea 512($context),%rdi # &context.Xmm6 | |
2094 | mov \$20,%ecx | |
2095 | .long 0xa548f3fc # cld; rep movsq | |
2096 | ||
2097 | .Lcommon_seh_tail: | |
2098 | mov 8(%rax),%rdi | |
2099 | mov 16(%rax),%rsi | |
2100 | mov %rax,152($context) # restore context->Rsp | |
2101 | mov %rsi,168($context) # restore context->Rsi | |
2102 | mov %rdi,176($context) # restore context->Rdi | |
2103 | ||
2104 | mov 40($disp),%rdi # disp->ContextRecord | |
2105 | mov $context,%rsi # context | |
2106 | mov \$154,%ecx # sizeof(CONTEXT) | |
2107 | .long 0xa548f3fc # cld; rep movsq | |
2108 | ||
2109 | mov $disp,%rsi | |
2110 | xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER | |
2111 | mov 8(%rsi),%rdx # arg2, disp->ImageBase | |
2112 | mov 0(%rsi),%r8 # arg3, disp->ControlPc | |
2113 | mov 16(%rsi),%r9 # arg4, disp->FunctionEntry | |
2114 | mov 40(%rsi),%r10 # disp->ContextRecord | |
2115 | lea 56(%rsi),%r11 # &disp->HandlerData | |
2116 | lea 24(%rsi),%r12 # &disp->EstablisherFrame | |
2117 | mov %r10,32(%rsp) # arg5 | |
2118 | mov %r11,40(%rsp) # arg6 | |
2119 | mov %r12,48(%rsp) # arg7 | |
2120 | mov %rcx,56(%rsp) # arg8, (NULL) | |
2121 | call *__imp_RtlVirtualUnwind(%rip) | |
2122 | ||
2123 | mov \$1,%eax # ExceptionContinueSearch | |
2124 | add \$64,%rsp | |
2125 | popfq | |
2126 | pop %r15 | |
2127 | pop %r14 | |
2128 | pop %r13 | |
2129 | pop %r12 | |
2130 | pop %rbp | |
2131 | pop %rbx | |
2132 | pop %rdi | |
2133 | pop %rsi | |
2134 | ret | |
2135 | .size avx_handler,.-avx_handler | |
2136 | ||
2137 | .section .pdata | |
2138 | .align 4 | |
2139 | .rva .LSEH_begin_poly1305_init | |
2140 | .rva .LSEH_end_poly1305_init | |
2141 | .rva .LSEH_info_poly1305_init | |
2142 | ||
2143 | .rva .LSEH_begin_poly1305_blocks | |
2144 | .rva .LSEH_end_poly1305_blocks | |
2145 | .rva .LSEH_info_poly1305_blocks | |
2146 | ||
2147 | .rva .LSEH_begin_poly1305_emit | |
2148 | .rva .LSEH_end_poly1305_emit | |
2149 | .rva .LSEH_info_poly1305_emit | |
2150 | ___ | |
2151 | $code.=<<___ if ($avx); | |
2152 | .rva .LSEH_begin_poly1305_blocks_avx | |
2153 | .rva .Lbase2_64_avx | |
2154 | .rva .LSEH_info_poly1305_blocks_avx_1 | |
2155 | ||
2156 | .rva .Lbase2_64_avx | |
2157 | .rva .Leven_avx | |
2158 | .rva .LSEH_info_poly1305_blocks_avx_2 | |
2159 | ||
2160 | .rva .Leven_avx | |
2161 | .rva .LSEH_end_poly1305_blocks_avx | |
2162 | .rva .LSEH_info_poly1305_blocks_avx_3 | |
2163 | ||
2164 | .rva .LSEH_begin_poly1305_emit_avx | |
2165 | .rva .LSEH_end_poly1305_emit_avx | |
2166 | .rva .LSEH_info_poly1305_emit_avx | |
2167 | ___ | |
2168 | $code.=<<___ if ($avx>1); | |
2169 | .rva .LSEH_begin_poly1305_blocks_avx2 | |
2170 | .rva .Lbase2_64_avx2 | |
2171 | .rva .LSEH_info_poly1305_blocks_avx2_1 | |
2172 | ||
2173 | .rva .Lbase2_64_avx2 | |
2174 | .rva .Leven_avx2 | |
2175 | .rva .LSEH_info_poly1305_blocks_avx2_2 | |
2176 | ||
2177 | .rva .Leven_avx2 | |
2178 | .rva .LSEH_end_poly1305_blocks_avx2 | |
2179 | .rva .LSEH_info_poly1305_blocks_avx2_3 | |
2180 | ___ | |
2181 | $code.=<<___; | |
2182 | .section .xdata | |
2183 | .align 8 | |
2184 | .LSEH_info_poly1305_init: | |
2185 | .byte 9,0,0,0 | |
2186 | .rva se_handler | |
2187 | .rva .LSEH_begin_poly1305_init,.LSEH_begin_poly1305_init | |
2188 | ||
2189 | .LSEH_info_poly1305_blocks: | |
2190 | .byte 9,0,0,0 | |
2191 | .rva se_handler | |
2192 | .rva .Lblocks_body,.Lblocks_epilogue | |
2193 | ||
2194 | .LSEH_info_poly1305_emit: | |
2195 | .byte 9,0,0,0 | |
2196 | .rva se_handler | |
2197 | .rva .LSEH_begin_poly1305_emit,.LSEH_begin_poly1305_emit | |
2198 | ___ | |
2199 | $code.=<<___ if ($avx); | |
2200 | .LSEH_info_poly1305_blocks_avx_1: | |
2201 | .byte 9,0,0,0 | |
2202 | .rva se_handler | |
2203 | .rva .Lblocks_avx_body,.Lblocks_avx_epilogue # HandlerData[] | |
2204 | ||
2205 | .LSEH_info_poly1305_blocks_avx_2: | |
2206 | .byte 9,0,0,0 | |
2207 | .rva se_handler | |
2208 | .rva .Lbase2_64_avx_body,.Lbase2_64_avx_epilogue # HandlerData[] | |
2209 | ||
2210 | .LSEH_info_poly1305_blocks_avx_3: | |
2211 | .byte 9,0,0,0 | |
2212 | .rva avx_handler | |
2213 | .rva .Ldo_avx_body,.Ldo_avx_epilogue # HandlerData[] | |
2214 | ||
2215 | .LSEH_info_poly1305_emit_avx: | |
2216 | .byte 9,0,0,0 | |
2217 | .rva se_handler | |
2218 | .rva .LSEH_begin_poly1305_emit_avx,.LSEH_begin_poly1305_emit_avx | |
2219 | ___ | |
2220 | $code.=<<___ if ($avx>1); | |
2221 | .LSEH_info_poly1305_blocks_avx2_1: | |
2222 | .byte 9,0,0,0 | |
2223 | .rva se_handler | |
2224 | .rva .Lblocks_avx2_body,.Lblocks_avx2_epilogue # HandlerData[] | |
2225 | ||
2226 | .LSEH_info_poly1305_blocks_avx2_2: | |
2227 | .byte 9,0,0,0 | |
2228 | .rva se_handler | |
2229 | .rva .Lbase2_64_avx2_body,.Lbase2_64_avx2_epilogue # HandlerData[] | |
2230 | ||
2231 | .LSEH_info_poly1305_blocks_avx2_3: | |
2232 | .byte 9,0,0,0 | |
2233 | .rva avx_handler | |
2234 | .rva .Ldo_avx2_body,.Ldo_avx2_epilogue # HandlerData[] | |
2235 | ___ | |
2236 | } | |
2237 | ||
2238 | foreach (split('\n',$code)) { | |
2239 | s/\`([^\`]*)\`/eval($1)/ge; | |
2240 | s/%r([a-z]+)#d/%e$1/g; | |
2241 | s/%r([0-9]+)#d/%r$1d/g; | |
2242 | s/%x#%y/%x/g; | |
2243 | ||
2244 | print $_,"\n"; | |
2245 | } | |
2246 | close STDOUT; |