]> git.ipfire.org Git - thirdparty/openssl.git/blame - crypto/poly1305/asm/poly1305-armv8.pl
poly1305/asm/poly1305-*.pl: flip horizontal add and reduction.
[thirdparty/openssl.git] / crypto / poly1305 / asm / poly1305-armv8.pl
CommitLineData
1fdcef75
AP
1#!/usr/bin/env perl
2#
3# ====================================================================
4# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9#
10# This module implements Poly1305 hash for ARMv8.
11#
12# June 2015
13#
14# Numbers are cycles per processed byte with poly1305_blocks alone.
15#
16# IALU/gcc-4.9 NEON
17#
18# Apple A7 1.86/+5% 0.72
19# Cortex-A53 2.63/+58% 1.47
20# Cortex-A57 2.70/+7% 1.14
21# Denver 1.39/+50% 1.18(*)
22# X-Gene 2.00/+68% 2.19
23#
24# (*) estimate based on resources availability is less than 1.0,
25# i.e. measured result is worse than expected, presumably binary
26# translator is not almighty;
27
28$flavour=shift;
29$output=shift;
30
31$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
32( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
33( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
34die "can't locate arm-xlate.pl";
35
36open OUT,"| \"$^X\" $xlate $flavour $output";
37*STDOUT=*OUT;
38
39my ($ctx,$inp,$len,$padbit) = map("x$_",(0..3));
40my ($mac,$nonce)=($inp,$len);
41
42my ($h0,$h1,$h2,$r0,$r1,$s1,$t0,$t1,$d0,$d1,$d2) = map("x$_",(4..14));
43
44$code.=<<___;
45#include "arm_arch.h"
46
47.text
48
49// forward "declarations" are required for Apple
50.extern OPENSSL_armcap_P
51.globl poly1305_blocks
52.globl poly1305_emit
53
54.globl poly1305_init
55.type poly1305_init,%function
56.align 5
57poly1305_init:
58 cmp $inp,xzr
59 stp xzr,xzr,[$ctx] // zero hash value
60 stp xzr,xzr,[$ctx,#16] // [along with is_base2_26]
61
62 csel x0,xzr,x0,eq
63 b.eq .Lno_key
64
65#ifdef __ILP32__
66 ldrsw $t1,.LOPENSSL_armcap_P
67#else
68 ldr $t1,.LOPENSSL_armcap_P
69#endif
70 adr $t0,.LOPENSSL_armcap_P
71
72 ldp $r0,$r1,[$inp] // load key
73 mov $s1,#0xfffffffc0fffffff
74 movk $s1,#0x0fff,lsl#48
75 ldr w17,[$t0,$t1]
76#ifdef __ARMEB__
77 rev $r0,$r0 // flip bytes
78 rev $r1,$r1
79#endif
80 and $r0,$r0,$s1 // &=0ffffffc0fffffff
81 and $s1,$s1,#-4
82 and $r1,$r1,$s1 // &=0ffffffc0ffffffc
83 stp $r0,$r1,[$ctx,#32] // save key value
84
85 tst w17,#ARMV7_NEON
86
87 adr $d0,poly1305_blocks
88 adr $r0,poly1305_blocks_neon
89 adr $d1,poly1305_emit
90 adr $r1,poly1305_emit_neon
91
92 csel $d0,$d0,$r0,eq
93 csel $d1,$d1,$r1,eq
94
95 stp $d0,$d1,[$len]
96
97 mov x0,#1
98.Lno_key:
99 ret
100.size poly1305_init,.-poly1305_init
101
102.type poly1305_blocks,%function
103.align 5
104poly1305_blocks:
105 ands $len,$len,#-16
106 b.eq .Lno_data
107
108 ldp $h0,$h1,[$ctx] // load hash value
109 ldp $r0,$r1,[$ctx,#32] // load key value
110 ldr $h2,[$ctx,#16]
111 add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
112 b .Loop
113
114.align 5
115.Loop:
116 ldp $t0,$t1,[$inp],#16 // load input
117 sub $len,$len,#16
118#ifdef __ARMEB__
119 rev $t0,$t0
120 rev $t1,$t1
121#endif
122 adds $h0,$h0,$t0 // accumulate input
123 adcs $h1,$h1,$t1
124
125 mul $d0,$h0,$r0 // h0*r0
126 adc $h2,$h2,$padbit
127 umulh $d1,$h0,$r0
128
129 mul $t0,$h1,$s1 // h1*5*r1
130 umulh $t1,$h1,$s1
131
132 adds $d0,$d0,$t0
133 mul $t0,$h0,$r1 // h0*r1
134 adc $d1,$d1,$t1
135 umulh $d2,$h0,$r1
136
137 adds $d1,$d1,$t0
138 mul $t0,$h1,$r0 // h1*r0
139 adc $d2,$d2,xzr
140 umulh $t1,$h1,$r0
141
142 adds $d1,$d1,$t0
143 mul $t0,$h2,$s1 // h2*5*r1
144 adc $d2,$d2,$t1
145 mul $t1,$h2,$r0 // h2*r0
146
147 adds $d1,$d1,$t0
148 adc $d2,$d2,$t1
149
150 and $t0,$d2,#-4 // final reduction
151 and $h2,$d2,#3
152 add $t0,$t0,$d2,lsr#2
153 adds $h0,$d0,$t0
154 adc $h1,$d1,xzr
155
156 cbnz $len,.Loop
157
158 stp $h0,$h1,[$ctx] // store hash value
159 str $h2,[$ctx,#16]
160
161.Lno_data:
162 ret
163.size poly1305_blocks,.-poly1305_blocks
164
165.type poly1305_emit,%function
166.align 5
167poly1305_emit:
168 ldp $h0,$h1,[$ctx] // load hash base 2^64
169 ldr $h2,[$ctx,#16]
170 ldp $t0,$t1,[$nonce] // load nonce
171
172 adds $d0,$h0,#5 // compare to modulus
173 adcs $d1,$h1,xzr
174 adc $d2,$h2,xzr
175
176 tst $d2,#-4 // see if it's carried/borrowed
177
178 csel $h0,$h0,$d0,eq
179 csel $h1,$h1,$d1,eq
180
181#ifdef __ARMEB__
182 ror $t0,$t0,#32 // flip nonce words
183 ror $t1,$t1,#32
184#endif
185 adds $h0,$h0,$t0 // accumulate nonce
186 adc $h1,$h1,$t1
187#ifdef __ARMEB__
188 rev $h0,$h0 // flip output bytes
189 rev $h1,$h1
190#endif
191 stp $h0,$h1,[$mac] // write result
192
193 ret
194.size poly1305_emit,.-poly1305_emit
195___
196my ($R0,$R1,$S1,$R2,$S2,$R3,$S3,$R4,$S4) = map("v$_.4s",(0..8));
197my ($IN01_0,$IN01_1,$IN01_2,$IN01_3,$IN01_4) = map("v$_.2s",(9..13));
198my ($IN23_0,$IN23_1,$IN23_2,$IN23_3,$IN23_4) = map("v$_.2s",(14..18));
199my ($ACC0,$ACC1,$ACC2,$ACC3,$ACC4) = map("v$_.2d",(19..23));
200my ($H0,$H1,$H2,$H3,$H4) = map("v$_.2s",(24..28));
201my ($T0,$T1,$MASK) = map("v$_",(29..31));
202
203my ($in2,$zeros)=("x16","x17");
204my $is_base2_26 = $zeros; # borrow
205
206$code.=<<___;
207.type poly1305_mult,%function
208.align 5
209poly1305_mult:
210 mul $d0,$h0,$r0 // h0*r0
211 umulh $d1,$h0,$r0
212
213 mul $t0,$h1,$s1 // h1*5*r1
214 umulh $t1,$h1,$s1
215
216 adds $d0,$d0,$t0
217 mul $t0,$h0,$r1 // h0*r1
218 adc $d1,$d1,$t1
219 umulh $d2,$h0,$r1
220
221 adds $d1,$d1,$t0
222 mul $t0,$h1,$r0 // h1*r0
223 adc $d2,$d2,xzr
224 umulh $t1,$h1,$r0
225
226 adds $d1,$d1,$t0
227 mul $t0,$h2,$s1 // h2*5*r1
228 adc $d2,$d2,$t1
229 mul $t1,$h2,$r0 // h2*r0
230
231 adds $d1,$d1,$t0
232 adc $d2,$d2,$t1
233
234 and $t0,$d2,#-4 // final reduction
235 and $h2,$d2,#3
236 add $t0,$t0,$d2,lsr#2
237 adds $h0,$d0,$t0
238 adc $h1,$d1,xzr
239
240 ret
241.size poly1305_mult,.-poly1305_mult
242
243.type poly1305_splat,%function
244.align 5
245poly1305_splat:
246 and x12,$h0,#0x03ffffff // base 2^64 -> base 2^26
247 ubfx x13,$h0,#26,#26
248 extr x14,$h1,$h0,#52
249 and x14,x14,#0x03ffffff
250 ubfx x15,$h1,#14,#26
251 extr x16,$h2,$h1,#40
252
253 str w12,[$ctx,#16*0] // r0
254 add w12,w13,w13,lsl#2 // r1*5
255 str w13,[$ctx,#16*1] // r1
256 add w13,w14,w14,lsl#2 // r2*5
257 str w12,[$ctx,#16*2] // s1
258 str w14,[$ctx,#16*3] // r2
259 add w14,w15,w15,lsl#2 // r3*5
260 str w13,[$ctx,#16*4] // s2
261 str w15,[$ctx,#16*5] // r3
262 add w15,w16,w16,lsl#2 // r4*5
263 str w14,[$ctx,#16*6] // s3
264 str w16,[$ctx,#16*7] // r4
265 str w15,[$ctx,#16*8] // s4
266
267 ret
268.size poly1305_splat,.-poly1305_splat
269
270.type poly1305_blocks_neon,%function
271.align 5
272poly1305_blocks_neon:
273 ldr $is_base2_26,[$ctx,#24]
274 cmp $len,#128
275 b.hs .Lblocks_neon
276 cbz $is_base2_26,poly1305_blocks
277
278.Lblocks_neon:
279 stp x29,x30,[sp,#-80]!
280 add x29,sp,#0
281
282 ands $len,$len,#-16
283 b.eq .Lno_data_neon
284
285 cbz $is_base2_26,.Lbase2_64_neon
286
287 ldp w10,w11,[$ctx] // load hash value base 2^26
288 ldp w12,w13,[$ctx,#8]
289 ldr w14,[$ctx,#16]
290
291 tst $len,#31
292 b.eq .Leven_neon
293
294 ldp $r0,$r1,[$ctx,#32] // load key value
295
296 add $h0,x10,x11,lsl#26 // base 2^26 -> base 2^64
297 lsr $h1,x12,#12
298 adds $h0,$h0,x12,lsl#52
299 add $h1,$h1,x13,lsl#14
300 adc $h1,$h1,xzr
301 lsr $h2,x14,#24
302 adds $h1,$h1,x14,lsl#40
303 adc $d2,$h2,xzr // can be partially reduced...
304
305 ldp $d0,$d1,[$inp],#16 // load input
306 sub $len,$len,#16
307 add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
308
309 and $t0,$d2,#-4 // ... so reduce
310 and $h2,$d2,#3
311 add $t0,$t0,$d2,lsr#2
312 adds $h0,$h0,$t0
313 adc $h1,$h1,xzr
314
315#ifdef __ARMEB__
316 rev $d0,$d0
317 rev $d1,$d1
318#endif
319 adds $h0,$h0,$d0 // accumulate input
320 adcs $h1,$h1,$d1
321 adc $h2,$h2,$padbit
322
323 bl poly1305_mult
324 ldr x30,[sp,#8]
325
326 cbz $padbit,.Lstore_base2_64_neon
327
328 and x10,$h0,#0x03ffffff // base 2^64 -> base 2^26
329 ubfx x11,$h0,#26,#26
330 extr x12,$h1,$h0,#52
331 and x12,x12,#0x03ffffff
332 ubfx x13,$h1,#14,#26
333 extr x14,$h2,$h1,#40
334
335 cbnz $len,.Leven_neon
336
337 stp w10,w11,[$ctx] // store hash value base 2^26
338 stp w12,w13,[$ctx,#8]
339 str w14,[$ctx,#16]
340 b .Lno_data_neon
341
342.align 4
343.Lstore_base2_64_neon:
344 stp $h0,$h1,[$ctx] // store hash value base 2^64
345 stp $h2,xzr,[$ctx,#16] // note that is_base2_26 is zeroed
346 b .Lno_data_neon
347
348.align 4
349.Lbase2_64_neon:
350 ldp $r0,$r1,[$ctx,#32] // load key value
351
352 ldp $h0,$h1,[$ctx] // load hash value base 2^64
353 ldr $h2,[$ctx,#16]
354
355 tst $len,#31
356 b.eq .Linit_neon
357
358 ldp $d0,$d1,[$inp],#16 // load input
359 sub $len,$len,#16
360 add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
361#ifdef __ARMEB__
362 rev $d0,$d0
363 rev $d1,$d1
364#endif
365 adds $h0,$h0,$d0 // accumulate input
366 adcs $h1,$h1,$d1
367 adc $h2,$h2,$padbit
368
369 bl poly1305_mult
370
371.Linit_neon:
372 and x10,$h0,#0x03ffffff // base 2^64 -> base 2^26
373 ubfx x11,$h0,#26,#26
374 extr x12,$h1,$h0,#52
375 and x12,x12,#0x03ffffff
376 ubfx x13,$h1,#14,#26
377 extr x14,$h2,$h1,#40
378
379 stp d8,d9,[sp,#16] // meet ABI requirements
380 stp d10,d11,[sp,#32]
381 stp d12,d13,[sp,#48]
382 stp d14,d15,[sp,#64]
383
384 fmov ${H0},x10
385 fmov ${H1},x11
386 fmov ${H2},x12
387 fmov ${H3},x13
388 fmov ${H4},x14
389
390 ////////////////////////////////// initialize r^n table
391 mov $h0,$r0 // r^1
392 add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
393 mov $h1,$r1
394 mov $h2,xzr
395 add $ctx,$ctx,#48+12
396 bl poly1305_splat
397
398 bl poly1305_mult // r^2
399 sub $ctx,$ctx,#4
400 bl poly1305_splat
401
402 bl poly1305_mult // r^3
403 sub $ctx,$ctx,#4
404 bl poly1305_splat
405
406 bl poly1305_mult // r^4
407 sub $ctx,$ctx,#4
408 bl poly1305_splat
409 ldr x30,[sp,#8]
410
411 add $in2,$inp,#32
412 adr $zeros,.Lzeros
413 subs $len,$len,#64
414 csel $in2,$zeros,$in2,lo
415
416 mov x4,#1
417 str x4,[$ctx,#-24] // set is_base2_26
418 sub $ctx,$ctx,#48 // restore original $ctx
419 b .Ldo_neon
420
421.align 4
422.Leven_neon:
423 add $in2,$inp,#32
424 adr $zeros,.Lzeros
425 subs $len,$len,#64
426 csel $in2,$zeros,$in2,lo
427
428 stp d8,d9,[sp,#16] // meet ABI requirements
429 stp d10,d11,[sp,#32]
430 stp d12,d13,[sp,#48]
431 stp d14,d15,[sp,#64]
432
433 fmov ${H0},x10
434 fmov ${H1},x11
435 fmov ${H2},x12
436 fmov ${H3},x13
437 fmov ${H4},x14
438
439.Ldo_neon:
440 ldp x8,x12,[$in2],#16 // inp[2:3] (or zero)
441 ldp x9,x13,[$in2],#48
442
443 lsl $padbit,$padbit,#24
444 add x15,$ctx,#48
445
446#ifdef __ARMEB__
447 rev x8,x8
448 rev x12,x12
449 rev x9,x9
450 rev x13,x13
451#endif
452 and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
453 and x5,x9,#0x03ffffff
454 ubfx x6,x8,#26,#26
455 ubfx x7,x9,#26,#26
456 add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
457 extr x8,x12,x8,#52
458 extr x9,x13,x9,#52
459 add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
460 fmov $IN23_0,x4
461 and x8,x8,#0x03ffffff
462 and x9,x9,#0x03ffffff
463 ubfx x10,x12,#14,#26
464 ubfx x11,x13,#14,#26
465 add x12,$padbit,x12,lsr#40
466 add x13,$padbit,x13,lsr#40
467 add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
468 fmov $IN23_1,x6
469 add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
470 add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
471 fmov $IN23_2,x8
472 fmov $IN23_3,x10
473 fmov $IN23_4,x12
474
475 ldp x8,x12,[$inp],#16 // inp[0:1]
476 ldp x9,x13,[$inp],#48
477
478 ld1 {$R0,$R1,$S1,$R2},[x15],#64
479 ld1 {$S2,$R3,$S3,$R4},[x15],#64
480 ld1 {$S4},[x15]
481
482#ifdef __ARMEB__
483 rev x8,x8
484 rev x12,x12
485 rev x9,x9
486 rev x13,x13
487#endif
488 and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
489 and x5,x9,#0x03ffffff
490 ubfx x6,x8,#26,#26
491 ubfx x7,x9,#26,#26
492 add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
493 extr x8,x12,x8,#52
494 extr x9,x13,x9,#52
495 add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
496 fmov $IN01_0,x4
497 and x8,x8,#0x03ffffff
498 and x9,x9,#0x03ffffff
499 ubfx x10,x12,#14,#26
500 ubfx x11,x13,#14,#26
501 add x12,$padbit,x12,lsr#40
502 add x13,$padbit,x13,lsr#40
503 add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
504 fmov $IN01_1,x6
505 add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
506 add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
507 fmov $IN01_2,x8
508 fmov $IN01_3,x10
509 fmov $IN01_4,x12
510
511 b.ls .Lskip_loop
512
513.align 4
514.Loop_neon:
515 ////////////////////////////////////////////////////////////////
516 // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
517 // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
518 // \___________________/
519 // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
520 // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
521 // \___________________/ \____________________/
522 //
523 // Note that we start with inp[2:3]*r^2. This is because it
524 // doesn't depend on reduction in previous iteration.
525 ////////////////////////////////////////////////////////////////
526 // d4 = h0*r4 + h1*r3 + h2*r2 + h3*r1 + h4*r0
527 // d3 = h0*r3 + h1*r2 + h2*r1 + h3*r0 + h4*5*r4
528 // d2 = h0*r2 + h1*r1 + h2*r0 + h3*5*r4 + h4*5*r3
529 // d1 = h0*r1 + h1*r0 + h2*5*r4 + h3*5*r3 + h4*5*r2
530 // d0 = h0*r0 + h1*5*r4 + h2*5*r3 + h3*5*r2 + h4*5*r1
531
532 subs $len,$len,#64
533 umull $ACC4,$IN23_0,${R4}[2]
534 csel $in2,$zeros,$in2,lo
535 umull $ACC3,$IN23_0,${R3}[2]
536 umull $ACC2,$IN23_0,${R2}[2]
537 ldp x8,x12,[$in2],#16 // inp[2:3] (or zero)
538 umull $ACC1,$IN23_0,${R1}[2]
539 ldp x9,x13,[$in2],#48
540 umull $ACC0,$IN23_0,${R0}[2]
541#ifdef __ARMEB__
542 rev x8,x8
543 rev x12,x12
544 rev x9,x9
545 rev x13,x13
546#endif
547
548 umlal $ACC4,$IN23_1,${R3}[2]
549 and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
550 umlal $ACC3,$IN23_1,${R2}[2]
551 and x5,x9,#0x03ffffff
552 umlal $ACC2,$IN23_1,${R1}[2]
553 ubfx x6,x8,#26,#26
554 umlal $ACC1,$IN23_1,${R0}[2]
555 ubfx x7,x9,#26,#26
556 umlal $ACC0,$IN23_1,${S4}[2]
557 add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
558
559 umlal $ACC4,$IN23_2,${R2}[2]
560 extr x8,x12,x8,#52
561 umlal $ACC3,$IN23_2,${R1}[2]
562 extr x9,x13,x9,#52
563 umlal $ACC2,$IN23_2,${R0}[2]
564 add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
565 umlal $ACC1,$IN23_2,${S4}[2]
566 fmov $IN23_0,x4
567 umlal $ACC0,$IN23_2,${S3}[2]
568 and x8,x8,#0x03ffffff
569
570 umlal $ACC4,$IN23_3,${R1}[2]
571 and x9,x9,#0x03ffffff
572 umlal $ACC3,$IN23_3,${R0}[2]
573 ubfx x10,x12,#14,#26
574 umlal $ACC2,$IN23_3,${S4}[2]
575 ubfx x11,x13,#14,#26
576 umlal $ACC1,$IN23_3,${S3}[2]
577 add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
578 umlal $ACC0,$IN23_3,${S2}[2]
579 fmov $IN23_1,x6
580
581 add $IN01_2,$IN01_2,$H2
582 add x12,$padbit,x12,lsr#40
583 umlal $ACC4,$IN23_4,${R0}[2]
584 add x13,$padbit,x13,lsr#40
585 umlal $ACC3,$IN23_4,${S4}[2]
586 add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
587 umlal $ACC2,$IN23_4,${S3}[2]
588 add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
589 umlal $ACC1,$IN23_4,${S2}[2]
590 fmov $IN23_2,x8
591 umlal $ACC0,$IN23_4,${S1}[2]
592 fmov $IN23_3,x10
593
594 ////////////////////////////////////////////////////////////////
595 // (hash+inp[0:1])*r^4 and accumulate
596
597 add $IN01_0,$IN01_0,$H0
598 fmov $IN23_4,x12
599 umlal $ACC3,$IN01_2,${R1}[0]
600 ldp x8,x12,[$inp],#16 // inp[0:1]
601 umlal $ACC0,$IN01_2,${S3}[0]
602 ldp x9,x13,[$inp],#48
603 umlal $ACC4,$IN01_2,${R2}[0]
604 umlal $ACC1,$IN01_2,${S4}[0]
605 umlal $ACC2,$IN01_2,${R0}[0]
606#ifdef __ARMEB__
607 rev x8,x8
608 rev x12,x12
609 rev x9,x9
610 rev x13,x13
611#endif
612
613 add $IN01_1,$IN01_1,$H1
614 umlal $ACC3,$IN01_0,${R3}[0]
615 umlal $ACC4,$IN01_0,${R4}[0]
616 and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
617 umlal $ACC2,$IN01_0,${R2}[0]
618 and x5,x9,#0x03ffffff
619 umlal $ACC0,$IN01_0,${R0}[0]
620 ubfx x6,x8,#26,#26
621 umlal $ACC1,$IN01_0,${R1}[0]
622 ubfx x7,x9,#26,#26
623
624 add $IN01_3,$IN01_3,$H3
625 add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
626 umlal $ACC3,$IN01_1,${R2}[0]
627 extr x8,x12,x8,#52
628 umlal $ACC4,$IN01_1,${R3}[0]
629 extr x9,x13,x9,#52
630 umlal $ACC0,$IN01_1,${S4}[0]
631 add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
632 umlal $ACC2,$IN01_1,${R1}[0]
633 fmov $IN01_0,x4
634 umlal $ACC1,$IN01_1,${R0}[0]
635 and x8,x8,#0x03ffffff
636
637 add $IN01_4,$IN01_4,$H4
638 and x9,x9,#0x03ffffff
639 umlal $ACC3,$IN01_3,${R0}[0]
640 ubfx x10,x12,#14,#26
641 umlal $ACC0,$IN01_3,${S2}[0]
642 ubfx x11,x13,#14,#26
643 umlal $ACC4,$IN01_3,${R1}[0]
644 add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
645 umlal $ACC1,$IN01_3,${S3}[0]
646 fmov $IN01_1,x6
647 umlal $ACC2,$IN01_3,${S4}[0]
648 add x12,$padbit,x12,lsr#40
649
650 umlal $ACC3,$IN01_4,${S4}[0]
651 add x13,$padbit,x13,lsr#40
652 umlal $ACC0,$IN01_4,${S1}[0]
653 add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
654 umlal $ACC4,$IN01_4,${R0}[0]
655 add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
656 umlal $ACC1,$IN01_4,${S2}[0]
657 fmov $IN01_2,x8
658 umlal $ACC2,$IN01_4,${S3}[0]
659 fmov $IN01_3,x10
660
661 /////////////////////////////////////////////////////////////////
662 // lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
663 // and P. Schwabe
664
665 ushr $T0.2d,$ACC3,#26
666 fmov $IN01_4,x12
667 xtn $H3,$ACC3
668 ushr $T1.2d,$ACC0,#26
669 xtn $H0,$ACC0
670 add $ACC4,$ACC4,$T0.2d // h3 -> h4
671 bic $H3,#0xfc,lsl#24 // &=0x03ffffff
672 add $ACC1,$ACC1,$T1.2d // h0 -> h1
673 bic $H0,#0xfc,lsl#24
674
675 shrn $T0.2s,$ACC4,#26
676 xtn $H4,$ACC4
677 ushr $T1.2d,$ACC1,#26
678 xtn $H1,$ACC1
679 add $ACC2,$ACC2,$T1.2d // h1 -> h2
680 bic $H4,#0xfc,lsl#24
681 bic $H1,#0xfc,lsl#24
682
683 add $H0,$H0,$T0.2s
684 shl $T0.2s,$T0.2s,#2
685 shrn $T1.2s,$ACC2,#26
686 xtn $H2,$ACC2
687 add $H0,$H0,$T0.2s // h4 -> h0
688 add $H3,$H3,$T1.2s // h2 -> h3
689 bic $H2,#0xfc,lsl#24
690
691 ushr $T0.2s,$H0,#26
692 bic $H0,#0xfc,lsl#24
693 ushr $T1.2s,$H3,#26
694 bic $H3,#0xfc,lsl#24
695 add $H1,$H1,$T0.2s // h0 -> h1
696 add $H4,$H4,$T1.2s // h3 -> h4
697
698 b.hi .Loop_neon
699
700.Lskip_loop:
701 dup $IN23_2,${IN23_2}[0]
702 movi $MASK.2d,#-1
703 add $IN01_2,$IN01_2,$H2
704 ushr $MASK.2d,$MASK.2d,#38
705
706 ////////////////////////////////////////////////////////////////
707 // multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
708
709 adds $len,$len,#32
710 b.ne .Long_tail
711
712 dup $IN23_2,${IN01_2}[0]
713 add $IN23_0,$IN01_0,$H0
714 add $IN23_3,$IN01_3,$H3
715 add $IN23_1,$IN01_1,$H1
716 add $IN23_4,$IN01_4,$H4
717
718.Long_tail:
719 dup $IN23_0,${IN23_0}[0]
720 umull2 $ACC0,$IN23_2,${S3}
721 umull2 $ACC3,$IN23_2,${R1}
722 umull2 $ACC4,$IN23_2,${R2}
723 umull2 $ACC2,$IN23_2,${R0}
724 umull2 $ACC1,$IN23_2,${S4}
725
726 dup $IN23_1,${IN23_1}[0]
727 umlal2 $ACC0,$IN23_0,${R0}
728 umlal2 $ACC2,$IN23_0,${R2}
729 umlal2 $ACC3,$IN23_0,${R3}
730 umlal2 $ACC4,$IN23_0,${R4}
731 umlal2 $ACC1,$IN23_0,${R1}
732
733 dup $IN23_3,${IN23_3}[0]
734 umlal2 $ACC0,$IN23_1,${S4}
735 umlal2 $ACC3,$IN23_1,${R2}
736 umlal2 $ACC2,$IN23_1,${R1}
737 umlal2 $ACC4,$IN23_1,${R3}
738 umlal2 $ACC1,$IN23_1,${R0}
739
740 dup $IN23_4,${IN23_4}[0]
741 umlal2 $ACC3,$IN23_3,${R0}
742 umlal2 $ACC4,$IN23_3,${R1}
743 umlal2 $ACC0,$IN23_3,${S2}
744 umlal2 $ACC1,$IN23_3,${S3}
745 umlal2 $ACC2,$IN23_3,${S4}
746
747 umlal2 $ACC3,$IN23_4,${S4}
748 umlal2 $ACC0,$IN23_4,${S1}
749 umlal2 $ACC4,$IN23_4,${R0}
750 umlal2 $ACC1,$IN23_4,${S2}
751 umlal2 $ACC2,$IN23_4,${S3}
752
753 b.eq .Lshort_tail
754
755 ////////////////////////////////////////////////////////////////
756 // (hash+inp[0:1])*r^4:r^3 and accumulate
757
758 add $IN01_0,$IN01_0,$H0
759 umlal $ACC3,$IN01_2,${R1}
760 umlal $ACC0,$IN01_2,${S3}
761 umlal $ACC4,$IN01_2,${R2}
762 umlal $ACC1,$IN01_2,${S4}
763 umlal $ACC2,$IN01_2,${R0}
764
765 add $IN01_1,$IN01_1,$H1
766 umlal $ACC3,$IN01_0,${R3}
767 umlal $ACC0,$IN01_0,${R0}
768 umlal $ACC4,$IN01_0,${R4}
769 umlal $ACC1,$IN01_0,${R1}
770 umlal $ACC2,$IN01_0,${R2}
771
772 add $IN01_3,$IN01_3,$H3
773 umlal $ACC3,$IN01_1,${R2}
774 umlal $ACC0,$IN01_1,${S4}
775 umlal $ACC4,$IN01_1,${R3}
776 umlal $ACC1,$IN01_1,${R0}
777 umlal $ACC2,$IN01_1,${R1}
778
779 add $IN01_4,$IN01_4,$H4
780 umlal $ACC3,$IN01_3,${R0}
781 umlal $ACC0,$IN01_3,${S2}
782 umlal $ACC4,$IN01_3,${R1}
783 umlal $ACC1,$IN01_3,${S3}
784 umlal $ACC2,$IN01_3,${S4}
785
786 umlal $ACC3,$IN01_4,${S4}
787 umlal $ACC0,$IN01_4,${S1}
788 umlal $ACC4,$IN01_4,${R0}
789 umlal $ACC1,$IN01_4,${S2}
790 umlal $ACC2,$IN01_4,${S3}
791
792.Lshort_tail:
1ea8ae50
AP
793 ////////////////////////////////////////////////////////////////
794 // horizontal add
795
796 addp $ACC3,$ACC3,$ACC3
797 ldp d8,d9,[sp,#16] // meet ABI requirements
798 addp $ACC0,$ACC0,$ACC0
799 ldp d10,d11,[sp,#32]
800 addp $ACC4,$ACC4,$ACC4
801 ldp d12,d13,[sp,#48]
802 addp $ACC1,$ACC1,$ACC1
803 ldp d14,d15,[sp,#64]
804 addp $ACC2,$ACC2,$ACC2
805
1fdcef75
AP
806 ////////////////////////////////////////////////////////////////
807 // lazy reduction, but without narrowing
808
809 ushr $T0.2d,$ACC3,#26
810 and $ACC3,$ACC3,$MASK.2d
811 ushr $T1.2d,$ACC0,#26
812 and $ACC0,$ACC0,$MASK.2d
813
814 add $ACC4,$ACC4,$T0.2d // h3 -> h4
815 add $ACC1,$ACC1,$T1.2d // h0 -> h1
816
817 ushr $T0.2d,$ACC4,#26
818 and $ACC4,$ACC4,$MASK.2d
819 ushr $T1.2d,$ACC1,#26
820 and $ACC1,$ACC1,$MASK.2d
821 add $ACC2,$ACC2,$T1.2d // h1 -> h2
822
823 add $ACC0,$ACC0,$T0.2d
824 shl $T0.2d,$T0.2d,#2
825 ushr $T1.2d,$ACC2,#26
826 and $ACC2,$ACC2,$MASK.2d
827 add $ACC0,$ACC0,$T0.2d // h4 -> h0
828 add $ACC3,$ACC3,$T1.2d // h2 -> h3
829
830 ushr $T0.2d,$ACC0,#26
831 and $ACC0,$ACC0,$MASK.2d
832 ushr $T1.2d,$ACC3,#26
833 and $ACC3,$ACC3,$MASK.2d
834 add $ACC1,$ACC1,$T0.2d // h0 -> h1
835 add $ACC4,$ACC4,$T1.2d // h3 -> h4
836
1fdcef75
AP
837 ////////////////////////////////////////////////////////////////
838 // write the result, can be partially reduced
839
840 st4 {$ACC0,$ACC1,$ACC2,$ACC3}[0],[$ctx],#16
841 st1 {$ACC4}[0],[$ctx]
842
843.Lno_data_neon:
844 ldr x29,[sp],#80
845 ret
846.size poly1305_blocks_neon,.-poly1305_blocks_neon
847
848.type poly1305_emit_neon,%function
849.align 5
850poly1305_emit_neon:
851 ldr $is_base2_26,[$ctx,#24]
852 cbz $is_base2_26,poly1305_emit
853
854 ldp w10,w11,[$ctx] // load hash value base 2^26
855 ldp w12,w13,[$ctx,#8]
856 ldr w14,[$ctx,#16]
857
858 add $h0,x10,x11,lsl#26 // base 2^26 -> base 2^64
859 lsr $h1,x12,#12
860 adds $h0,$h0,x12,lsl#52
861 add $h1,$h1,x13,lsl#14
862 adc $h1,$h1,xzr
863 lsr $h2,x14,#24
864 adds $h1,$h1,x14,lsl#40
865 adc $h2,$h2,xzr // can be partially reduced...
866
867 ldp $t0,$t1,[$nonce] // load nonce
868
869 and $d0,$h2,#-4 // ... so reduce
870 add $d0,$d0,$h2,lsr#2
871 and $h2,$h2,#3
872 adds $h0,$h0,$d0
873 adc $h1,$h1,xzr
874
875 adds $d0,$h0,#5 // compare to modulus
876 adcs $d1,$h1,xzr
877 adc $d2,$h2,xzr
878
879 tst $d2,#-4 // see if it's carried/borrowed
880
881 csel $h0,$h0,$d0,eq
882 csel $h1,$h1,$d1,eq
883
884#ifdef __ARMEB__
885 ror $t0,$t0,#32 // flip nonce words
886 ror $t1,$t1,#32
887#endif
888 adds $h0,$h0,$t0 // accumulate nonce
889 adc $h1,$h1,$t1
890#ifdef __ARMEB__
891 rev $h0,$h0 // flip output bytes
892 rev $h1,$h1
893#endif
894 stp $h0,$h1,[$mac] // write result
895
896 ret
897.size poly1305_emit_neon,.-poly1305_emit_neon
898
899.align 5
900.Lzeros:
901.long 0,0,0,0,0,0,0,0
902.LOPENSSL_armcap_P:
903#ifdef __ILP32__
904.long OPENSSL_armcap_P-.
905#else
906.quad OPENSSL_armcap_P-.
907#endif
908.asciz "Poly1305 for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
909.align 2
910___
911
912foreach (split("\n",$code)) {
913 s/\b(shrn\s+v[0-9]+)\.[24]d/$1.2s/ or
914 s/\b(fmov\s+)v([0-9]+)[^,]*,\s*x([0-9]+)/$1d$2,x$3/ or
915 (m/\bdup\b/ and (s/\.[24]s/.2d/g or 1)) or
916 (m/\b(eor|and)/ and (s/\.[248][sdh]/.16b/g or 1)) or
917 (m/\bum(ul|la)l\b/ and (s/\.4s/.2s/g or 1)) or
918 (m/\bum(ul|la)l2\b/ and (s/\.2s/.4s/g or 1)) or
919 (m/\bst[1-4]\s+{[^}]+}\[/ and (s/\.[24]d/.s/g or 1));
920
921 s/\.[124]([sd])\[/.$1\[/;
922
923 print $_,"\n";
924}
925close STDOUT;