]> git.ipfire.org Git - thirdparty/openssl.git/blame - crypto/poly1305/asm/poly1305-armv8.pl
ARM64 assembly pack: add ThunderX2 results.
[thirdparty/openssl.git] / crypto / poly1305 / asm / poly1305-armv8.pl
CommitLineData
6aa36e8e
RS
1#! /usr/bin/env perl
2# Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
3#
49d3b641 4# Licensed under the Apache License 2.0 (the "License"). You may not use
6aa36e8e
RS
5# this file except in compliance with the License. You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
1fdcef75
AP
9#
10# ====================================================================
11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16#
17# This module implements Poly1305 hash for ARMv8.
18#
19# June 2015
20#
21# Numbers are cycles per processed byte with poly1305_blocks alone.
22#
23# IALU/gcc-4.9 NEON
24#
25# Apple A7 1.86/+5% 0.72
4b8736a2 26# Cortex-A53 2.69/+58% 1.47
1fdcef75 27# Cortex-A57 2.70/+7% 1.14
4b8736a2 28# Denver 1.64/+50% 1.18(*)
dc3c5067 29# X-Gene 2.13/+68% 2.27
05ef4d19 30# Mongoose 1.77/+75% 1.12
75331623 31# Kryo 2.70/+55% 1.13
6465321e 32# ThunderX2 1.17/+95% 1.36
1fdcef75
AP
33#
34# (*) estimate based on resources availability is less than 1.0,
35# i.e. measured result is worse than expected, presumably binary
36# translator is not almighty;
37
38$flavour=shift;
39$output=shift;
40
41$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
42( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
43( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
44die "can't locate arm-xlate.pl";
45
46open OUT,"| \"$^X\" $xlate $flavour $output";
47*STDOUT=*OUT;
48
49my ($ctx,$inp,$len,$padbit) = map("x$_",(0..3));
50my ($mac,$nonce)=($inp,$len);
51
52my ($h0,$h1,$h2,$r0,$r1,$s1,$t0,$t1,$d0,$d1,$d2) = map("x$_",(4..14));
53
54$code.=<<___;
55#include "arm_arch.h"
56
57.text
58
59// forward "declarations" are required for Apple
60.extern OPENSSL_armcap_P
61.globl poly1305_blocks
62.globl poly1305_emit
63
64.globl poly1305_init
65.type poly1305_init,%function
66.align 5
67poly1305_init:
68 cmp $inp,xzr
69 stp xzr,xzr,[$ctx] // zero hash value
70 stp xzr,xzr,[$ctx,#16] // [along with is_base2_26]
71
72 csel x0,xzr,x0,eq
73 b.eq .Lno_key
74
db42bb44
AP
75 adrp x17,OPENSSL_armcap_P
76 ldr w17,[x17,#:lo12:OPENSSL_armcap_P]
1fdcef75
AP
77
78 ldp $r0,$r1,[$inp] // load key
79 mov $s1,#0xfffffffc0fffffff
80 movk $s1,#0x0fff,lsl#48
1fdcef75
AP
81#ifdef __ARMEB__
82 rev $r0,$r0 // flip bytes
83 rev $r1,$r1
84#endif
85 and $r0,$r0,$s1 // &=0ffffffc0fffffff
86 and $s1,$s1,#-4
87 and $r1,$r1,$s1 // &=0ffffffc0ffffffc
88 stp $r0,$r1,[$ctx,#32] // save key value
89
90 tst w17,#ARMV7_NEON
91
db42bb44
AP
92 adr $d0,.Lpoly1305_blocks
93 adr $r0,.Lpoly1305_blocks_neon
94 adr $d1,.Lpoly1305_emit
95 adr $r1,.Lpoly1305_emit_neon
1fdcef75
AP
96
97 csel $d0,$d0,$r0,eq
98 csel $d1,$d1,$r1,eq
99
a25cef89
AP
100#ifdef __ILP32__
101 stp w12,w13,[$len]
102#else
1fdcef75 103 stp $d0,$d1,[$len]
a25cef89 104#endif
1fdcef75
AP
105
106 mov x0,#1
107.Lno_key:
108 ret
109.size poly1305_init,.-poly1305_init
110
111.type poly1305_blocks,%function
112.align 5
113poly1305_blocks:
db42bb44 114.Lpoly1305_blocks:
1fdcef75
AP
115 ands $len,$len,#-16
116 b.eq .Lno_data
117
118 ldp $h0,$h1,[$ctx] // load hash value
119 ldp $r0,$r1,[$ctx,#32] // load key value
120 ldr $h2,[$ctx,#16]
121 add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
122 b .Loop
123
124.align 5
125.Loop:
126 ldp $t0,$t1,[$inp],#16 // load input
127 sub $len,$len,#16
128#ifdef __ARMEB__
129 rev $t0,$t0
130 rev $t1,$t1
131#endif
132 adds $h0,$h0,$t0 // accumulate input
133 adcs $h1,$h1,$t1
134
135 mul $d0,$h0,$r0 // h0*r0
136 adc $h2,$h2,$padbit
137 umulh $d1,$h0,$r0
138
139 mul $t0,$h1,$s1 // h1*5*r1
140 umulh $t1,$h1,$s1
141
142 adds $d0,$d0,$t0
143 mul $t0,$h0,$r1 // h0*r1
144 adc $d1,$d1,$t1
145 umulh $d2,$h0,$r1
146
147 adds $d1,$d1,$t0
148 mul $t0,$h1,$r0 // h1*r0
149 adc $d2,$d2,xzr
150 umulh $t1,$h1,$r0
151
152 adds $d1,$d1,$t0
153 mul $t0,$h2,$s1 // h2*5*r1
154 adc $d2,$d2,$t1
155 mul $t1,$h2,$r0 // h2*r0
156
157 adds $d1,$d1,$t0
158 adc $d2,$d2,$t1
159
160 and $t0,$d2,#-4 // final reduction
161 and $h2,$d2,#3
162 add $t0,$t0,$d2,lsr#2
163 adds $h0,$d0,$t0
4b8736a2
AP
164 adcs $h1,$d1,xzr
165 adc $h2,$h2,xzr
1fdcef75
AP
166
167 cbnz $len,.Loop
168
169 stp $h0,$h1,[$ctx] // store hash value
170 str $h2,[$ctx,#16]
171
172.Lno_data:
173 ret
174.size poly1305_blocks,.-poly1305_blocks
175
176.type poly1305_emit,%function
177.align 5
178poly1305_emit:
db42bb44 179.Lpoly1305_emit:
1fdcef75
AP
180 ldp $h0,$h1,[$ctx] // load hash base 2^64
181 ldr $h2,[$ctx,#16]
182 ldp $t0,$t1,[$nonce] // load nonce
183
184 adds $d0,$h0,#5 // compare to modulus
185 adcs $d1,$h1,xzr
186 adc $d2,$h2,xzr
187
188 tst $d2,#-4 // see if it's carried/borrowed
189
190 csel $h0,$h0,$d0,eq
191 csel $h1,$h1,$d1,eq
192
193#ifdef __ARMEB__
194 ror $t0,$t0,#32 // flip nonce words
195 ror $t1,$t1,#32
196#endif
197 adds $h0,$h0,$t0 // accumulate nonce
198 adc $h1,$h1,$t1
199#ifdef __ARMEB__
200 rev $h0,$h0 // flip output bytes
201 rev $h1,$h1
202#endif
203 stp $h0,$h1,[$mac] // write result
204
205 ret
206.size poly1305_emit,.-poly1305_emit
207___
208my ($R0,$R1,$S1,$R2,$S2,$R3,$S3,$R4,$S4) = map("v$_.4s",(0..8));
209my ($IN01_0,$IN01_1,$IN01_2,$IN01_3,$IN01_4) = map("v$_.2s",(9..13));
210my ($IN23_0,$IN23_1,$IN23_2,$IN23_3,$IN23_4) = map("v$_.2s",(14..18));
211my ($ACC0,$ACC1,$ACC2,$ACC3,$ACC4) = map("v$_.2d",(19..23));
212my ($H0,$H1,$H2,$H3,$H4) = map("v$_.2s",(24..28));
213my ($T0,$T1,$MASK) = map("v$_",(29..31));
214
215my ($in2,$zeros)=("x16","x17");
216my $is_base2_26 = $zeros; # borrow
217
218$code.=<<___;
219.type poly1305_mult,%function
220.align 5
221poly1305_mult:
222 mul $d0,$h0,$r0 // h0*r0
223 umulh $d1,$h0,$r0
224
225 mul $t0,$h1,$s1 // h1*5*r1
226 umulh $t1,$h1,$s1
227
228 adds $d0,$d0,$t0
229 mul $t0,$h0,$r1 // h0*r1
230 adc $d1,$d1,$t1
231 umulh $d2,$h0,$r1
232
233 adds $d1,$d1,$t0
234 mul $t0,$h1,$r0 // h1*r0
235 adc $d2,$d2,xzr
236 umulh $t1,$h1,$r0
237
238 adds $d1,$d1,$t0
239 mul $t0,$h2,$s1 // h2*5*r1
240 adc $d2,$d2,$t1
241 mul $t1,$h2,$r0 // h2*r0
242
243 adds $d1,$d1,$t0
244 adc $d2,$d2,$t1
245
246 and $t0,$d2,#-4 // final reduction
247 and $h2,$d2,#3
248 add $t0,$t0,$d2,lsr#2
249 adds $h0,$d0,$t0
4b8736a2
AP
250 adcs $h1,$d1,xzr
251 adc $h2,$h2,xzr
1fdcef75
AP
252
253 ret
254.size poly1305_mult,.-poly1305_mult
255
256.type poly1305_splat,%function
257.align 5
258poly1305_splat:
259 and x12,$h0,#0x03ffffff // base 2^64 -> base 2^26
260 ubfx x13,$h0,#26,#26
261 extr x14,$h1,$h0,#52
262 and x14,x14,#0x03ffffff
263 ubfx x15,$h1,#14,#26
264 extr x16,$h2,$h1,#40
265
266 str w12,[$ctx,#16*0] // r0
267 add w12,w13,w13,lsl#2 // r1*5
268 str w13,[$ctx,#16*1] // r1
269 add w13,w14,w14,lsl#2 // r2*5
270 str w12,[$ctx,#16*2] // s1
271 str w14,[$ctx,#16*3] // r2
272 add w14,w15,w15,lsl#2 // r3*5
273 str w13,[$ctx,#16*4] // s2
274 str w15,[$ctx,#16*5] // r3
275 add w15,w16,w16,lsl#2 // r4*5
276 str w14,[$ctx,#16*6] // s3
277 str w16,[$ctx,#16*7] // r4
278 str w15,[$ctx,#16*8] // s4
279
280 ret
281.size poly1305_splat,.-poly1305_splat
282
283.type poly1305_blocks_neon,%function
284.align 5
285poly1305_blocks_neon:
db42bb44 286.Lpoly1305_blocks_neon:
1fdcef75
AP
287 ldr $is_base2_26,[$ctx,#24]
288 cmp $len,#128
289 b.hs .Lblocks_neon
db42bb44 290 cbz $is_base2_26,.Lpoly1305_blocks
1fdcef75
AP
291
292.Lblocks_neon:
9a18aae5 293 .inst 0xd503233f // paciasp
1fdcef75
AP
294 stp x29,x30,[sp,#-80]!
295 add x29,sp,#0
296
297 ands $len,$len,#-16
298 b.eq .Lno_data_neon
299
300 cbz $is_base2_26,.Lbase2_64_neon
301
302 ldp w10,w11,[$ctx] // load hash value base 2^26
303 ldp w12,w13,[$ctx,#8]
304 ldr w14,[$ctx,#16]
305
306 tst $len,#31
307 b.eq .Leven_neon
308
309 ldp $r0,$r1,[$ctx,#32] // load key value
310
311 add $h0,x10,x11,lsl#26 // base 2^26 -> base 2^64
312 lsr $h1,x12,#12
313 adds $h0,$h0,x12,lsl#52
314 add $h1,$h1,x13,lsl#14
315 adc $h1,$h1,xzr
316 lsr $h2,x14,#24
317 adds $h1,$h1,x14,lsl#40
318 adc $d2,$h2,xzr // can be partially reduced...
319
320 ldp $d0,$d1,[$inp],#16 // load input
321 sub $len,$len,#16
322 add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
323
324 and $t0,$d2,#-4 // ... so reduce
325 and $h2,$d2,#3
326 add $t0,$t0,$d2,lsr#2
327 adds $h0,$h0,$t0
4b8736a2
AP
328 adcs $h1,$h1,xzr
329 adc $h2,$h2,xzr
1fdcef75
AP
330
331#ifdef __ARMEB__
332 rev $d0,$d0
333 rev $d1,$d1
334#endif
335 adds $h0,$h0,$d0 // accumulate input
336 adcs $h1,$h1,$d1
337 adc $h2,$h2,$padbit
338
339 bl poly1305_mult
340 ldr x30,[sp,#8]
341
342 cbz $padbit,.Lstore_base2_64_neon
343
344 and x10,$h0,#0x03ffffff // base 2^64 -> base 2^26
345 ubfx x11,$h0,#26,#26
346 extr x12,$h1,$h0,#52
347 and x12,x12,#0x03ffffff
348 ubfx x13,$h1,#14,#26
349 extr x14,$h2,$h1,#40
350
351 cbnz $len,.Leven_neon
352
353 stp w10,w11,[$ctx] // store hash value base 2^26
354 stp w12,w13,[$ctx,#8]
355 str w14,[$ctx,#16]
356 b .Lno_data_neon
357
358.align 4
359.Lstore_base2_64_neon:
360 stp $h0,$h1,[$ctx] // store hash value base 2^64
361 stp $h2,xzr,[$ctx,#16] // note that is_base2_26 is zeroed
362 b .Lno_data_neon
363
364.align 4
365.Lbase2_64_neon:
366 ldp $r0,$r1,[$ctx,#32] // load key value
367
368 ldp $h0,$h1,[$ctx] // load hash value base 2^64
369 ldr $h2,[$ctx,#16]
370
371 tst $len,#31
372 b.eq .Linit_neon
373
374 ldp $d0,$d1,[$inp],#16 // load input
375 sub $len,$len,#16
376 add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
377#ifdef __ARMEB__
378 rev $d0,$d0
379 rev $d1,$d1
380#endif
381 adds $h0,$h0,$d0 // accumulate input
382 adcs $h1,$h1,$d1
383 adc $h2,$h2,$padbit
384
385 bl poly1305_mult
386
387.Linit_neon:
388 and x10,$h0,#0x03ffffff // base 2^64 -> base 2^26
389 ubfx x11,$h0,#26,#26
390 extr x12,$h1,$h0,#52
391 and x12,x12,#0x03ffffff
392 ubfx x13,$h1,#14,#26
393 extr x14,$h2,$h1,#40
394
395 stp d8,d9,[sp,#16] // meet ABI requirements
396 stp d10,d11,[sp,#32]
397 stp d12,d13,[sp,#48]
398 stp d14,d15,[sp,#64]
399
400 fmov ${H0},x10
401 fmov ${H1},x11
402 fmov ${H2},x12
403 fmov ${H3},x13
404 fmov ${H4},x14
405
406 ////////////////////////////////// initialize r^n table
407 mov $h0,$r0 // r^1
408 add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
409 mov $h1,$r1
410 mov $h2,xzr
411 add $ctx,$ctx,#48+12
412 bl poly1305_splat
413
414 bl poly1305_mult // r^2
415 sub $ctx,$ctx,#4
416 bl poly1305_splat
417
418 bl poly1305_mult // r^3
419 sub $ctx,$ctx,#4
420 bl poly1305_splat
421
422 bl poly1305_mult // r^4
423 sub $ctx,$ctx,#4
424 bl poly1305_splat
425 ldr x30,[sp,#8]
426
427 add $in2,$inp,#32
428 adr $zeros,.Lzeros
429 subs $len,$len,#64
430 csel $in2,$zeros,$in2,lo
431
432 mov x4,#1
db42bb44 433 stur x4,[$ctx,#-24] // set is_base2_26
1fdcef75
AP
434 sub $ctx,$ctx,#48 // restore original $ctx
435 b .Ldo_neon
436
437.align 4
438.Leven_neon:
439 add $in2,$inp,#32
440 adr $zeros,.Lzeros
441 subs $len,$len,#64
442 csel $in2,$zeros,$in2,lo
443
444 stp d8,d9,[sp,#16] // meet ABI requirements
445 stp d10,d11,[sp,#32]
446 stp d12,d13,[sp,#48]
447 stp d14,d15,[sp,#64]
448
449 fmov ${H0},x10
450 fmov ${H1},x11
451 fmov ${H2},x12
452 fmov ${H3},x13
453 fmov ${H4},x14
454
455.Ldo_neon:
456 ldp x8,x12,[$in2],#16 // inp[2:3] (or zero)
457 ldp x9,x13,[$in2],#48
458
459 lsl $padbit,$padbit,#24
460 add x15,$ctx,#48
461
462#ifdef __ARMEB__
463 rev x8,x8
464 rev x12,x12
465 rev x9,x9
466 rev x13,x13
467#endif
468 and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
469 and x5,x9,#0x03ffffff
470 ubfx x6,x8,#26,#26
471 ubfx x7,x9,#26,#26
472 add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
473 extr x8,x12,x8,#52
474 extr x9,x13,x9,#52
475 add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
476 fmov $IN23_0,x4
477 and x8,x8,#0x03ffffff
478 and x9,x9,#0x03ffffff
479 ubfx x10,x12,#14,#26
480 ubfx x11,x13,#14,#26
481 add x12,$padbit,x12,lsr#40
482 add x13,$padbit,x13,lsr#40
483 add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
484 fmov $IN23_1,x6
485 add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
486 add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
487 fmov $IN23_2,x8
488 fmov $IN23_3,x10
489 fmov $IN23_4,x12
490
491 ldp x8,x12,[$inp],#16 // inp[0:1]
492 ldp x9,x13,[$inp],#48
493
494 ld1 {$R0,$R1,$S1,$R2},[x15],#64
495 ld1 {$S2,$R3,$S3,$R4},[x15],#64
496 ld1 {$S4},[x15]
497
498#ifdef __ARMEB__
499 rev x8,x8
500 rev x12,x12
501 rev x9,x9
502 rev x13,x13
503#endif
504 and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
505 and x5,x9,#0x03ffffff
506 ubfx x6,x8,#26,#26
507 ubfx x7,x9,#26,#26
508 add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
509 extr x8,x12,x8,#52
510 extr x9,x13,x9,#52
511 add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
512 fmov $IN01_0,x4
513 and x8,x8,#0x03ffffff
514 and x9,x9,#0x03ffffff
515 ubfx x10,x12,#14,#26
516 ubfx x11,x13,#14,#26
517 add x12,$padbit,x12,lsr#40
518 add x13,$padbit,x13,lsr#40
519 add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
520 fmov $IN01_1,x6
521 add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
522 add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
dc3c5067 523 movi $MASK.2d,#-1
1fdcef75
AP
524 fmov $IN01_2,x8
525 fmov $IN01_3,x10
526 fmov $IN01_4,x12
dc3c5067 527 ushr $MASK.2d,$MASK.2d,#38
1fdcef75
AP
528
529 b.ls .Lskip_loop
530
531.align 4
532.Loop_neon:
533 ////////////////////////////////////////////////////////////////
534 // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
535 // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
536 // \___________________/
537 // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
538 // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
539 // \___________________/ \____________________/
540 //
541 // Note that we start with inp[2:3]*r^2. This is because it
542 // doesn't depend on reduction in previous iteration.
543 ////////////////////////////////////////////////////////////////
544 // d4 = h0*r4 + h1*r3 + h2*r2 + h3*r1 + h4*r0
545 // d3 = h0*r3 + h1*r2 + h2*r1 + h3*r0 + h4*5*r4
546 // d2 = h0*r2 + h1*r1 + h2*r0 + h3*5*r4 + h4*5*r3
547 // d1 = h0*r1 + h1*r0 + h2*5*r4 + h3*5*r3 + h4*5*r2
548 // d0 = h0*r0 + h1*5*r4 + h2*5*r3 + h3*5*r2 + h4*5*r1
549
550 subs $len,$len,#64
551 umull $ACC4,$IN23_0,${R4}[2]
552 csel $in2,$zeros,$in2,lo
553 umull $ACC3,$IN23_0,${R3}[2]
554 umull $ACC2,$IN23_0,${R2}[2]
555 ldp x8,x12,[$in2],#16 // inp[2:3] (or zero)
556 umull $ACC1,$IN23_0,${R1}[2]
557 ldp x9,x13,[$in2],#48
558 umull $ACC0,$IN23_0,${R0}[2]
559#ifdef __ARMEB__
560 rev x8,x8
561 rev x12,x12
562 rev x9,x9
563 rev x13,x13
564#endif
565
566 umlal $ACC4,$IN23_1,${R3}[2]
567 and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
568 umlal $ACC3,$IN23_1,${R2}[2]
569 and x5,x9,#0x03ffffff
570 umlal $ACC2,$IN23_1,${R1}[2]
571 ubfx x6,x8,#26,#26
572 umlal $ACC1,$IN23_1,${R0}[2]
573 ubfx x7,x9,#26,#26
574 umlal $ACC0,$IN23_1,${S4}[2]
575 add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
576
577 umlal $ACC4,$IN23_2,${R2}[2]
578 extr x8,x12,x8,#52
579 umlal $ACC3,$IN23_2,${R1}[2]
580 extr x9,x13,x9,#52
581 umlal $ACC2,$IN23_2,${R0}[2]
582 add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
583 umlal $ACC1,$IN23_2,${S4}[2]
584 fmov $IN23_0,x4
585 umlal $ACC0,$IN23_2,${S3}[2]
586 and x8,x8,#0x03ffffff
587
588 umlal $ACC4,$IN23_3,${R1}[2]
589 and x9,x9,#0x03ffffff
590 umlal $ACC3,$IN23_3,${R0}[2]
591 ubfx x10,x12,#14,#26
592 umlal $ACC2,$IN23_3,${S4}[2]
593 ubfx x11,x13,#14,#26
594 umlal $ACC1,$IN23_3,${S3}[2]
595 add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
596 umlal $ACC0,$IN23_3,${S2}[2]
597 fmov $IN23_1,x6
598
599 add $IN01_2,$IN01_2,$H2
600 add x12,$padbit,x12,lsr#40
601 umlal $ACC4,$IN23_4,${R0}[2]
602 add x13,$padbit,x13,lsr#40
603 umlal $ACC3,$IN23_4,${S4}[2]
604 add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
605 umlal $ACC2,$IN23_4,${S3}[2]
606 add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
607 umlal $ACC1,$IN23_4,${S2}[2]
608 fmov $IN23_2,x8
609 umlal $ACC0,$IN23_4,${S1}[2]
610 fmov $IN23_3,x10
611
612 ////////////////////////////////////////////////////////////////
613 // (hash+inp[0:1])*r^4 and accumulate
614
615 add $IN01_0,$IN01_0,$H0
616 fmov $IN23_4,x12
617 umlal $ACC3,$IN01_2,${R1}[0]
618 ldp x8,x12,[$inp],#16 // inp[0:1]
619 umlal $ACC0,$IN01_2,${S3}[0]
620 ldp x9,x13,[$inp],#48
621 umlal $ACC4,$IN01_2,${R2}[0]
622 umlal $ACC1,$IN01_2,${S4}[0]
623 umlal $ACC2,$IN01_2,${R0}[0]
624#ifdef __ARMEB__
625 rev x8,x8
626 rev x12,x12
627 rev x9,x9
628 rev x13,x13
629#endif
630
631 add $IN01_1,$IN01_1,$H1
632 umlal $ACC3,$IN01_0,${R3}[0]
633 umlal $ACC4,$IN01_0,${R4}[0]
634 and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
635 umlal $ACC2,$IN01_0,${R2}[0]
636 and x5,x9,#0x03ffffff
637 umlal $ACC0,$IN01_0,${R0}[0]
638 ubfx x6,x8,#26,#26
639 umlal $ACC1,$IN01_0,${R1}[0]
640 ubfx x7,x9,#26,#26
641
642 add $IN01_3,$IN01_3,$H3
643 add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
644 umlal $ACC3,$IN01_1,${R2}[0]
645 extr x8,x12,x8,#52
646 umlal $ACC4,$IN01_1,${R3}[0]
647 extr x9,x13,x9,#52
648 umlal $ACC0,$IN01_1,${S4}[0]
649 add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
650 umlal $ACC2,$IN01_1,${R1}[0]
651 fmov $IN01_0,x4
652 umlal $ACC1,$IN01_1,${R0}[0]
653 and x8,x8,#0x03ffffff
654
655 add $IN01_4,$IN01_4,$H4
656 and x9,x9,#0x03ffffff
657 umlal $ACC3,$IN01_3,${R0}[0]
658 ubfx x10,x12,#14,#26
659 umlal $ACC0,$IN01_3,${S2}[0]
660 ubfx x11,x13,#14,#26
661 umlal $ACC4,$IN01_3,${R1}[0]
662 add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
663 umlal $ACC1,$IN01_3,${S3}[0]
664 fmov $IN01_1,x6
665 umlal $ACC2,$IN01_3,${S4}[0]
666 add x12,$padbit,x12,lsr#40
667
668 umlal $ACC3,$IN01_4,${S4}[0]
669 add x13,$padbit,x13,lsr#40
670 umlal $ACC0,$IN01_4,${S1}[0]
671 add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
672 umlal $ACC4,$IN01_4,${R0}[0]
673 add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
674 umlal $ACC1,$IN01_4,${S2}[0]
675 fmov $IN01_2,x8
676 umlal $ACC2,$IN01_4,${S3}[0]
677 fmov $IN01_3,x10
dc3c5067 678 fmov $IN01_4,x12
1fdcef75
AP
679
680 /////////////////////////////////////////////////////////////////
681 // lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
dc3c5067
AP
682 // and P. Schwabe
683 //
684 // [see discussion in poly1305-armv4 module]
1fdcef75
AP
685
686 ushr $T0.2d,$ACC3,#26
1fdcef75
AP
687 xtn $H3,$ACC3
688 ushr $T1.2d,$ACC0,#26
dc3c5067 689 and $ACC0,$ACC0,$MASK.2d
1fdcef75
AP
690 add $ACC4,$ACC4,$T0.2d // h3 -> h4
691 bic $H3,#0xfc,lsl#24 // &=0x03ffffff
692 add $ACC1,$ACC1,$T1.2d // h0 -> h1
1fdcef75 693
dc3c5067 694 ushr $T0.2d,$ACC4,#26
1fdcef75
AP
695 xtn $H4,$ACC4
696 ushr $T1.2d,$ACC1,#26
697 xtn $H1,$ACC1
1fdcef75 698 bic $H4,#0xfc,lsl#24
dc3c5067 699 add $ACC2,$ACC2,$T1.2d // h1 -> h2
1fdcef75 700
dc3c5067
AP
701 add $ACC0,$ACC0,$T0.2d
702 shl $T0.2d,$T0.2d,#2
1fdcef75
AP
703 shrn $T1.2s,$ACC2,#26
704 xtn $H2,$ACC2
dc3c5067
AP
705 add $ACC0,$ACC0,$T0.2d // h4 -> h0
706 bic $H1,#0xfc,lsl#24
1fdcef75
AP
707 add $H3,$H3,$T1.2s // h2 -> h3
708 bic $H2,#0xfc,lsl#24
709
dc3c5067
AP
710 shrn $T0.2s,$ACC0,#26
711 xtn $H0,$ACC0
1fdcef75
AP
712 ushr $T1.2s,$H3,#26
713 bic $H3,#0xfc,lsl#24
dc3c5067 714 bic $H0,#0xfc,lsl#24
1fdcef75
AP
715 add $H1,$H1,$T0.2s // h0 -> h1
716 add $H4,$H4,$T1.2s // h3 -> h4
717
718 b.hi .Loop_neon
719
720.Lskip_loop:
721 dup $IN23_2,${IN23_2}[0]
1fdcef75 722 add $IN01_2,$IN01_2,$H2
1fdcef75
AP
723
724 ////////////////////////////////////////////////////////////////
725 // multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
726
727 adds $len,$len,#32
728 b.ne .Long_tail
729
730 dup $IN23_2,${IN01_2}[0]
731 add $IN23_0,$IN01_0,$H0
732 add $IN23_3,$IN01_3,$H3
733 add $IN23_1,$IN01_1,$H1
734 add $IN23_4,$IN01_4,$H4
735
736.Long_tail:
737 dup $IN23_0,${IN23_0}[0]
738 umull2 $ACC0,$IN23_2,${S3}
739 umull2 $ACC3,$IN23_2,${R1}
740 umull2 $ACC4,$IN23_2,${R2}
741 umull2 $ACC2,$IN23_2,${R0}
742 umull2 $ACC1,$IN23_2,${S4}
743
744 dup $IN23_1,${IN23_1}[0]
745 umlal2 $ACC0,$IN23_0,${R0}
746 umlal2 $ACC2,$IN23_0,${R2}
747 umlal2 $ACC3,$IN23_0,${R3}
748 umlal2 $ACC4,$IN23_0,${R4}
749 umlal2 $ACC1,$IN23_0,${R1}
750
751 dup $IN23_3,${IN23_3}[0]
752 umlal2 $ACC0,$IN23_1,${S4}
753 umlal2 $ACC3,$IN23_1,${R2}
754 umlal2 $ACC2,$IN23_1,${R1}
755 umlal2 $ACC4,$IN23_1,${R3}
756 umlal2 $ACC1,$IN23_1,${R0}
757
758 dup $IN23_4,${IN23_4}[0]
759 umlal2 $ACC3,$IN23_3,${R0}
760 umlal2 $ACC4,$IN23_3,${R1}
761 umlal2 $ACC0,$IN23_3,${S2}
762 umlal2 $ACC1,$IN23_3,${S3}
763 umlal2 $ACC2,$IN23_3,${S4}
764
765 umlal2 $ACC3,$IN23_4,${S4}
766 umlal2 $ACC0,$IN23_4,${S1}
767 umlal2 $ACC4,$IN23_4,${R0}
768 umlal2 $ACC1,$IN23_4,${S2}
769 umlal2 $ACC2,$IN23_4,${S3}
770
771 b.eq .Lshort_tail
772
773 ////////////////////////////////////////////////////////////////
774 // (hash+inp[0:1])*r^4:r^3 and accumulate
775
776 add $IN01_0,$IN01_0,$H0
777 umlal $ACC3,$IN01_2,${R1}
778 umlal $ACC0,$IN01_2,${S3}
779 umlal $ACC4,$IN01_2,${R2}
780 umlal $ACC1,$IN01_2,${S4}
781 umlal $ACC2,$IN01_2,${R0}
782
783 add $IN01_1,$IN01_1,$H1
784 umlal $ACC3,$IN01_0,${R3}
785 umlal $ACC0,$IN01_0,${R0}
786 umlal $ACC4,$IN01_0,${R4}
787 umlal $ACC1,$IN01_0,${R1}
788 umlal $ACC2,$IN01_0,${R2}
789
790 add $IN01_3,$IN01_3,$H3
791 umlal $ACC3,$IN01_1,${R2}
792 umlal $ACC0,$IN01_1,${S4}
793 umlal $ACC4,$IN01_1,${R3}
794 umlal $ACC1,$IN01_1,${R0}
795 umlal $ACC2,$IN01_1,${R1}
796
797 add $IN01_4,$IN01_4,$H4
798 umlal $ACC3,$IN01_3,${R0}
799 umlal $ACC0,$IN01_3,${S2}
800 umlal $ACC4,$IN01_3,${R1}
801 umlal $ACC1,$IN01_3,${S3}
802 umlal $ACC2,$IN01_3,${S4}
803
804 umlal $ACC3,$IN01_4,${S4}
805 umlal $ACC0,$IN01_4,${S1}
806 umlal $ACC4,$IN01_4,${R0}
807 umlal $ACC1,$IN01_4,${S2}
808 umlal $ACC2,$IN01_4,${S3}
809
810.Lshort_tail:
1ea8ae50
AP
811 ////////////////////////////////////////////////////////////////
812 // horizontal add
813
814 addp $ACC3,$ACC3,$ACC3
815 ldp d8,d9,[sp,#16] // meet ABI requirements
816 addp $ACC0,$ACC0,$ACC0
817 ldp d10,d11,[sp,#32]
818 addp $ACC4,$ACC4,$ACC4
819 ldp d12,d13,[sp,#48]
820 addp $ACC1,$ACC1,$ACC1
821 ldp d14,d15,[sp,#64]
822 addp $ACC2,$ACC2,$ACC2
823
1fdcef75
AP
824 ////////////////////////////////////////////////////////////////
825 // lazy reduction, but without narrowing
826
827 ushr $T0.2d,$ACC3,#26
828 and $ACC3,$ACC3,$MASK.2d
829 ushr $T1.2d,$ACC0,#26
830 and $ACC0,$ACC0,$MASK.2d
831
832 add $ACC4,$ACC4,$T0.2d // h3 -> h4
833 add $ACC1,$ACC1,$T1.2d // h0 -> h1
834
835 ushr $T0.2d,$ACC4,#26
836 and $ACC4,$ACC4,$MASK.2d
837 ushr $T1.2d,$ACC1,#26
838 and $ACC1,$ACC1,$MASK.2d
839 add $ACC2,$ACC2,$T1.2d // h1 -> h2
840
841 add $ACC0,$ACC0,$T0.2d
842 shl $T0.2d,$T0.2d,#2
843 ushr $T1.2d,$ACC2,#26
844 and $ACC2,$ACC2,$MASK.2d
845 add $ACC0,$ACC0,$T0.2d // h4 -> h0
846 add $ACC3,$ACC3,$T1.2d // h2 -> h3
847
848 ushr $T0.2d,$ACC0,#26
849 and $ACC0,$ACC0,$MASK.2d
850 ushr $T1.2d,$ACC3,#26
851 and $ACC3,$ACC3,$MASK.2d
852 add $ACC1,$ACC1,$T0.2d // h0 -> h1
853 add $ACC4,$ACC4,$T1.2d // h3 -> h4
854
1fdcef75
AP
855 ////////////////////////////////////////////////////////////////
856 // write the result, can be partially reduced
857
858 st4 {$ACC0,$ACC1,$ACC2,$ACC3}[0],[$ctx],#16
859 st1 {$ACC4}[0],[$ctx]
860
861.Lno_data_neon:
9a18aae5 862 .inst 0xd50323bf // autiasp
1fdcef75
AP
863 ldr x29,[sp],#80
864 ret
865.size poly1305_blocks_neon,.-poly1305_blocks_neon
866
867.type poly1305_emit_neon,%function
868.align 5
869poly1305_emit_neon:
db42bb44 870.Lpoly1305_emit_neon:
1fdcef75
AP
871 ldr $is_base2_26,[$ctx,#24]
872 cbz $is_base2_26,poly1305_emit
873
874 ldp w10,w11,[$ctx] // load hash value base 2^26
875 ldp w12,w13,[$ctx,#8]
876 ldr w14,[$ctx,#16]
877
878 add $h0,x10,x11,lsl#26 // base 2^26 -> base 2^64
879 lsr $h1,x12,#12
880 adds $h0,$h0,x12,lsl#52
881 add $h1,$h1,x13,lsl#14
882 adc $h1,$h1,xzr
883 lsr $h2,x14,#24
884 adds $h1,$h1,x14,lsl#40
885 adc $h2,$h2,xzr // can be partially reduced...
886
887 ldp $t0,$t1,[$nonce] // load nonce
888
889 and $d0,$h2,#-4 // ... so reduce
890 add $d0,$d0,$h2,lsr#2
891 and $h2,$h2,#3
892 adds $h0,$h0,$d0
4b8736a2
AP
893 adcs $h1,$h1,xzr
894 adc $h2,$h2,xzr
1fdcef75
AP
895
896 adds $d0,$h0,#5 // compare to modulus
897 adcs $d1,$h1,xzr
898 adc $d2,$h2,xzr
899
900 tst $d2,#-4 // see if it's carried/borrowed
901
902 csel $h0,$h0,$d0,eq
903 csel $h1,$h1,$d1,eq
904
905#ifdef __ARMEB__
906 ror $t0,$t0,#32 // flip nonce words
907 ror $t1,$t1,#32
908#endif
909 adds $h0,$h0,$t0 // accumulate nonce
910 adc $h1,$h1,$t1
911#ifdef __ARMEB__
912 rev $h0,$h0 // flip output bytes
913 rev $h1,$h1
914#endif
915 stp $h0,$h1,[$mac] // write result
916
917 ret
918.size poly1305_emit_neon,.-poly1305_emit_neon
919
920.align 5
921.Lzeros:
922.long 0,0,0,0,0,0,0,0
1fdcef75
AP
923.asciz "Poly1305 for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
924.align 2
925___
926
927foreach (split("\n",$code)) {
928 s/\b(shrn\s+v[0-9]+)\.[24]d/$1.2s/ or
929 s/\b(fmov\s+)v([0-9]+)[^,]*,\s*x([0-9]+)/$1d$2,x$3/ or
930 (m/\bdup\b/ and (s/\.[24]s/.2d/g or 1)) or
931 (m/\b(eor|and)/ and (s/\.[248][sdh]/.16b/g or 1)) or
932 (m/\bum(ul|la)l\b/ and (s/\.4s/.2s/g or 1)) or
933 (m/\bum(ul|la)l2\b/ and (s/\.2s/.4s/g or 1)) or
934 (m/\bst[1-4]\s+{[^}]+}\[/ and (s/\.[24]d/.s/g or 1));
935
936 s/\.[124]([sd])\[/.$1\[/;
937
938 print $_,"\n";
939}
940close STDOUT;