]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/poly1305/asm/poly1305-armv8.pl
Following the license change, modify the boilerplates in crypto/poly1305/
[thirdparty/openssl.git] / crypto / poly1305 / asm / poly1305-armv8.pl
1 #! /usr/bin/env perl
2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the Apache License 2.0 (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9 #
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16 #
17 # This module implements Poly1305 hash for ARMv8.
18 #
19 # June 2015
20 #
21 # Numbers are cycles per processed byte with poly1305_blocks alone.
22 #
23 # IALU/gcc-4.9 NEON
24 #
25 # Apple A7 1.86/+5% 0.72
26 # Cortex-A53 2.69/+58% 1.47
27 # Cortex-A57 2.70/+7% 1.14
28 # Denver 1.64/+50% 1.18(*)
29 # X-Gene 2.13/+68% 2.27
30 # Mongoose 1.77/+75% 1.12
31 # Kryo 2.70/+55% 1.13
32 #
33 # (*) estimate based on resources availability is less than 1.0,
34 # i.e. measured result is worse than expected, presumably binary
35 # translator is not almighty;
36
37 $flavour=shift;
38 $output=shift;
39
40 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
41 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
42 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
43 die "can't locate arm-xlate.pl";
44
45 open OUT,"| \"$^X\" $xlate $flavour $output";
46 *STDOUT=*OUT;
47
48 my ($ctx,$inp,$len,$padbit) = map("x$_",(0..3));
49 my ($mac,$nonce)=($inp,$len);
50
51 my ($h0,$h1,$h2,$r0,$r1,$s1,$t0,$t1,$d0,$d1,$d2) = map("x$_",(4..14));
52
53 $code.=<<___;
54 #include "arm_arch.h"
55
56 .text
57
58 // forward "declarations" are required for Apple
59 .extern OPENSSL_armcap_P
60 .globl poly1305_blocks
61 .globl poly1305_emit
62
63 .globl poly1305_init
64 .type poly1305_init,%function
65 .align 5
66 poly1305_init:
67 cmp $inp,xzr
68 stp xzr,xzr,[$ctx] // zero hash value
69 stp xzr,xzr,[$ctx,#16] // [along with is_base2_26]
70
71 csel x0,xzr,x0,eq
72 b.eq .Lno_key
73
74 #ifdef __ILP32__
75 ldrsw $t1,.LOPENSSL_armcap_P
76 #else
77 ldr $t1,.LOPENSSL_armcap_P
78 #endif
79 adr $t0,.LOPENSSL_armcap_P
80
81 ldp $r0,$r1,[$inp] // load key
82 mov $s1,#0xfffffffc0fffffff
83 movk $s1,#0x0fff,lsl#48
84 ldr w17,[$t0,$t1]
85 #ifdef __ARMEB__
86 rev $r0,$r0 // flip bytes
87 rev $r1,$r1
88 #endif
89 and $r0,$r0,$s1 // &=0ffffffc0fffffff
90 and $s1,$s1,#-4
91 and $r1,$r1,$s1 // &=0ffffffc0ffffffc
92 stp $r0,$r1,[$ctx,#32] // save key value
93
94 tst w17,#ARMV7_NEON
95
96 adr $d0,poly1305_blocks
97 adr $r0,poly1305_blocks_neon
98 adr $d1,poly1305_emit
99 adr $r1,poly1305_emit_neon
100
101 csel $d0,$d0,$r0,eq
102 csel $d1,$d1,$r1,eq
103
104 #ifdef __ILP32__
105 stp w12,w13,[$len]
106 #else
107 stp $d0,$d1,[$len]
108 #endif
109
110 mov x0,#1
111 .Lno_key:
112 ret
113 .size poly1305_init,.-poly1305_init
114
115 .type poly1305_blocks,%function
116 .align 5
117 poly1305_blocks:
118 ands $len,$len,#-16
119 b.eq .Lno_data
120
121 ldp $h0,$h1,[$ctx] // load hash value
122 ldp $r0,$r1,[$ctx,#32] // load key value
123 ldr $h2,[$ctx,#16]
124 add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
125 b .Loop
126
127 .align 5
128 .Loop:
129 ldp $t0,$t1,[$inp],#16 // load input
130 sub $len,$len,#16
131 #ifdef __ARMEB__
132 rev $t0,$t0
133 rev $t1,$t1
134 #endif
135 adds $h0,$h0,$t0 // accumulate input
136 adcs $h1,$h1,$t1
137
138 mul $d0,$h0,$r0 // h0*r0
139 adc $h2,$h2,$padbit
140 umulh $d1,$h0,$r0
141
142 mul $t0,$h1,$s1 // h1*5*r1
143 umulh $t1,$h1,$s1
144
145 adds $d0,$d0,$t0
146 mul $t0,$h0,$r1 // h0*r1
147 adc $d1,$d1,$t1
148 umulh $d2,$h0,$r1
149
150 adds $d1,$d1,$t0
151 mul $t0,$h1,$r0 // h1*r0
152 adc $d2,$d2,xzr
153 umulh $t1,$h1,$r0
154
155 adds $d1,$d1,$t0
156 mul $t0,$h2,$s1 // h2*5*r1
157 adc $d2,$d2,$t1
158 mul $t1,$h2,$r0 // h2*r0
159
160 adds $d1,$d1,$t0
161 adc $d2,$d2,$t1
162
163 and $t0,$d2,#-4 // final reduction
164 and $h2,$d2,#3
165 add $t0,$t0,$d2,lsr#2
166 adds $h0,$d0,$t0
167 adcs $h1,$d1,xzr
168 adc $h2,$h2,xzr
169
170 cbnz $len,.Loop
171
172 stp $h0,$h1,[$ctx] // store hash value
173 str $h2,[$ctx,#16]
174
175 .Lno_data:
176 ret
177 .size poly1305_blocks,.-poly1305_blocks
178
179 .type poly1305_emit,%function
180 .align 5
181 poly1305_emit:
182 ldp $h0,$h1,[$ctx] // load hash base 2^64
183 ldr $h2,[$ctx,#16]
184 ldp $t0,$t1,[$nonce] // load nonce
185
186 adds $d0,$h0,#5 // compare to modulus
187 adcs $d1,$h1,xzr
188 adc $d2,$h2,xzr
189
190 tst $d2,#-4 // see if it's carried/borrowed
191
192 csel $h0,$h0,$d0,eq
193 csel $h1,$h1,$d1,eq
194
195 #ifdef __ARMEB__
196 ror $t0,$t0,#32 // flip nonce words
197 ror $t1,$t1,#32
198 #endif
199 adds $h0,$h0,$t0 // accumulate nonce
200 adc $h1,$h1,$t1
201 #ifdef __ARMEB__
202 rev $h0,$h0 // flip output bytes
203 rev $h1,$h1
204 #endif
205 stp $h0,$h1,[$mac] // write result
206
207 ret
208 .size poly1305_emit,.-poly1305_emit
209 ___
210 my ($R0,$R1,$S1,$R2,$S2,$R3,$S3,$R4,$S4) = map("v$_.4s",(0..8));
211 my ($IN01_0,$IN01_1,$IN01_2,$IN01_3,$IN01_4) = map("v$_.2s",(9..13));
212 my ($IN23_0,$IN23_1,$IN23_2,$IN23_3,$IN23_4) = map("v$_.2s",(14..18));
213 my ($ACC0,$ACC1,$ACC2,$ACC3,$ACC4) = map("v$_.2d",(19..23));
214 my ($H0,$H1,$H2,$H3,$H4) = map("v$_.2s",(24..28));
215 my ($T0,$T1,$MASK) = map("v$_",(29..31));
216
217 my ($in2,$zeros)=("x16","x17");
218 my $is_base2_26 = $zeros; # borrow
219
220 $code.=<<___;
221 .type poly1305_mult,%function
222 .align 5
223 poly1305_mult:
224 mul $d0,$h0,$r0 // h0*r0
225 umulh $d1,$h0,$r0
226
227 mul $t0,$h1,$s1 // h1*5*r1
228 umulh $t1,$h1,$s1
229
230 adds $d0,$d0,$t0
231 mul $t0,$h0,$r1 // h0*r1
232 adc $d1,$d1,$t1
233 umulh $d2,$h0,$r1
234
235 adds $d1,$d1,$t0
236 mul $t0,$h1,$r0 // h1*r0
237 adc $d2,$d2,xzr
238 umulh $t1,$h1,$r0
239
240 adds $d1,$d1,$t0
241 mul $t0,$h2,$s1 // h2*5*r1
242 adc $d2,$d2,$t1
243 mul $t1,$h2,$r0 // h2*r0
244
245 adds $d1,$d1,$t0
246 adc $d2,$d2,$t1
247
248 and $t0,$d2,#-4 // final reduction
249 and $h2,$d2,#3
250 add $t0,$t0,$d2,lsr#2
251 adds $h0,$d0,$t0
252 adcs $h1,$d1,xzr
253 adc $h2,$h2,xzr
254
255 ret
256 .size poly1305_mult,.-poly1305_mult
257
258 .type poly1305_splat,%function
259 .align 5
260 poly1305_splat:
261 and x12,$h0,#0x03ffffff // base 2^64 -> base 2^26
262 ubfx x13,$h0,#26,#26
263 extr x14,$h1,$h0,#52
264 and x14,x14,#0x03ffffff
265 ubfx x15,$h1,#14,#26
266 extr x16,$h2,$h1,#40
267
268 str w12,[$ctx,#16*0] // r0
269 add w12,w13,w13,lsl#2 // r1*5
270 str w13,[$ctx,#16*1] // r1
271 add w13,w14,w14,lsl#2 // r2*5
272 str w12,[$ctx,#16*2] // s1
273 str w14,[$ctx,#16*3] // r2
274 add w14,w15,w15,lsl#2 // r3*5
275 str w13,[$ctx,#16*4] // s2
276 str w15,[$ctx,#16*5] // r3
277 add w15,w16,w16,lsl#2 // r4*5
278 str w14,[$ctx,#16*6] // s3
279 str w16,[$ctx,#16*7] // r4
280 str w15,[$ctx,#16*8] // s4
281
282 ret
283 .size poly1305_splat,.-poly1305_splat
284
285 .type poly1305_blocks_neon,%function
286 .align 5
287 poly1305_blocks_neon:
288 ldr $is_base2_26,[$ctx,#24]
289 cmp $len,#128
290 b.hs .Lblocks_neon
291 cbz $is_base2_26,poly1305_blocks
292
293 .Lblocks_neon:
294 stp x29,x30,[sp,#-80]!
295 add x29,sp,#0
296
297 ands $len,$len,#-16
298 b.eq .Lno_data_neon
299
300 cbz $is_base2_26,.Lbase2_64_neon
301
302 ldp w10,w11,[$ctx] // load hash value base 2^26
303 ldp w12,w13,[$ctx,#8]
304 ldr w14,[$ctx,#16]
305
306 tst $len,#31
307 b.eq .Leven_neon
308
309 ldp $r0,$r1,[$ctx,#32] // load key value
310
311 add $h0,x10,x11,lsl#26 // base 2^26 -> base 2^64
312 lsr $h1,x12,#12
313 adds $h0,$h0,x12,lsl#52
314 add $h1,$h1,x13,lsl#14
315 adc $h1,$h1,xzr
316 lsr $h2,x14,#24
317 adds $h1,$h1,x14,lsl#40
318 adc $d2,$h2,xzr // can be partially reduced...
319
320 ldp $d0,$d1,[$inp],#16 // load input
321 sub $len,$len,#16
322 add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
323
324 and $t0,$d2,#-4 // ... so reduce
325 and $h2,$d2,#3
326 add $t0,$t0,$d2,lsr#2
327 adds $h0,$h0,$t0
328 adcs $h1,$h1,xzr
329 adc $h2,$h2,xzr
330
331 #ifdef __ARMEB__
332 rev $d0,$d0
333 rev $d1,$d1
334 #endif
335 adds $h0,$h0,$d0 // accumulate input
336 adcs $h1,$h1,$d1
337 adc $h2,$h2,$padbit
338
339 bl poly1305_mult
340 ldr x30,[sp,#8]
341
342 cbz $padbit,.Lstore_base2_64_neon
343
344 and x10,$h0,#0x03ffffff // base 2^64 -> base 2^26
345 ubfx x11,$h0,#26,#26
346 extr x12,$h1,$h0,#52
347 and x12,x12,#0x03ffffff
348 ubfx x13,$h1,#14,#26
349 extr x14,$h2,$h1,#40
350
351 cbnz $len,.Leven_neon
352
353 stp w10,w11,[$ctx] // store hash value base 2^26
354 stp w12,w13,[$ctx,#8]
355 str w14,[$ctx,#16]
356 b .Lno_data_neon
357
358 .align 4
359 .Lstore_base2_64_neon:
360 stp $h0,$h1,[$ctx] // store hash value base 2^64
361 stp $h2,xzr,[$ctx,#16] // note that is_base2_26 is zeroed
362 b .Lno_data_neon
363
364 .align 4
365 .Lbase2_64_neon:
366 ldp $r0,$r1,[$ctx,#32] // load key value
367
368 ldp $h0,$h1,[$ctx] // load hash value base 2^64
369 ldr $h2,[$ctx,#16]
370
371 tst $len,#31
372 b.eq .Linit_neon
373
374 ldp $d0,$d1,[$inp],#16 // load input
375 sub $len,$len,#16
376 add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
377 #ifdef __ARMEB__
378 rev $d0,$d0
379 rev $d1,$d1
380 #endif
381 adds $h0,$h0,$d0 // accumulate input
382 adcs $h1,$h1,$d1
383 adc $h2,$h2,$padbit
384
385 bl poly1305_mult
386
387 .Linit_neon:
388 and x10,$h0,#0x03ffffff // base 2^64 -> base 2^26
389 ubfx x11,$h0,#26,#26
390 extr x12,$h1,$h0,#52
391 and x12,x12,#0x03ffffff
392 ubfx x13,$h1,#14,#26
393 extr x14,$h2,$h1,#40
394
395 stp d8,d9,[sp,#16] // meet ABI requirements
396 stp d10,d11,[sp,#32]
397 stp d12,d13,[sp,#48]
398 stp d14,d15,[sp,#64]
399
400 fmov ${H0},x10
401 fmov ${H1},x11
402 fmov ${H2},x12
403 fmov ${H3},x13
404 fmov ${H4},x14
405
406 ////////////////////////////////// initialize r^n table
407 mov $h0,$r0 // r^1
408 add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
409 mov $h1,$r1
410 mov $h2,xzr
411 add $ctx,$ctx,#48+12
412 bl poly1305_splat
413
414 bl poly1305_mult // r^2
415 sub $ctx,$ctx,#4
416 bl poly1305_splat
417
418 bl poly1305_mult // r^3
419 sub $ctx,$ctx,#4
420 bl poly1305_splat
421
422 bl poly1305_mult // r^4
423 sub $ctx,$ctx,#4
424 bl poly1305_splat
425 ldr x30,[sp,#8]
426
427 add $in2,$inp,#32
428 adr $zeros,.Lzeros
429 subs $len,$len,#64
430 csel $in2,$zeros,$in2,lo
431
432 mov x4,#1
433 str x4,[$ctx,#-24] // set is_base2_26
434 sub $ctx,$ctx,#48 // restore original $ctx
435 b .Ldo_neon
436
437 .align 4
438 .Leven_neon:
439 add $in2,$inp,#32
440 adr $zeros,.Lzeros
441 subs $len,$len,#64
442 csel $in2,$zeros,$in2,lo
443
444 stp d8,d9,[sp,#16] // meet ABI requirements
445 stp d10,d11,[sp,#32]
446 stp d12,d13,[sp,#48]
447 stp d14,d15,[sp,#64]
448
449 fmov ${H0},x10
450 fmov ${H1},x11
451 fmov ${H2},x12
452 fmov ${H3},x13
453 fmov ${H4},x14
454
455 .Ldo_neon:
456 ldp x8,x12,[$in2],#16 // inp[2:3] (or zero)
457 ldp x9,x13,[$in2],#48
458
459 lsl $padbit,$padbit,#24
460 add x15,$ctx,#48
461
462 #ifdef __ARMEB__
463 rev x8,x8
464 rev x12,x12
465 rev x9,x9
466 rev x13,x13
467 #endif
468 and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
469 and x5,x9,#0x03ffffff
470 ubfx x6,x8,#26,#26
471 ubfx x7,x9,#26,#26
472 add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
473 extr x8,x12,x8,#52
474 extr x9,x13,x9,#52
475 add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
476 fmov $IN23_0,x4
477 and x8,x8,#0x03ffffff
478 and x9,x9,#0x03ffffff
479 ubfx x10,x12,#14,#26
480 ubfx x11,x13,#14,#26
481 add x12,$padbit,x12,lsr#40
482 add x13,$padbit,x13,lsr#40
483 add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
484 fmov $IN23_1,x6
485 add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
486 add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
487 fmov $IN23_2,x8
488 fmov $IN23_3,x10
489 fmov $IN23_4,x12
490
491 ldp x8,x12,[$inp],#16 // inp[0:1]
492 ldp x9,x13,[$inp],#48
493
494 ld1 {$R0,$R1,$S1,$R2},[x15],#64
495 ld1 {$S2,$R3,$S3,$R4},[x15],#64
496 ld1 {$S4},[x15]
497
498 #ifdef __ARMEB__
499 rev x8,x8
500 rev x12,x12
501 rev x9,x9
502 rev x13,x13
503 #endif
504 and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
505 and x5,x9,#0x03ffffff
506 ubfx x6,x8,#26,#26
507 ubfx x7,x9,#26,#26
508 add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
509 extr x8,x12,x8,#52
510 extr x9,x13,x9,#52
511 add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
512 fmov $IN01_0,x4
513 and x8,x8,#0x03ffffff
514 and x9,x9,#0x03ffffff
515 ubfx x10,x12,#14,#26
516 ubfx x11,x13,#14,#26
517 add x12,$padbit,x12,lsr#40
518 add x13,$padbit,x13,lsr#40
519 add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
520 fmov $IN01_1,x6
521 add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
522 add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
523 movi $MASK.2d,#-1
524 fmov $IN01_2,x8
525 fmov $IN01_3,x10
526 fmov $IN01_4,x12
527 ushr $MASK.2d,$MASK.2d,#38
528
529 b.ls .Lskip_loop
530
531 .align 4
532 .Loop_neon:
533 ////////////////////////////////////////////////////////////////
534 // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
535 // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
536 // \___________________/
537 // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
538 // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
539 // \___________________/ \____________________/
540 //
541 // Note that we start with inp[2:3]*r^2. This is because it
542 // doesn't depend on reduction in previous iteration.
543 ////////////////////////////////////////////////////////////////
544 // d4 = h0*r4 + h1*r3 + h2*r2 + h3*r1 + h4*r0
545 // d3 = h0*r3 + h1*r2 + h2*r1 + h3*r0 + h4*5*r4
546 // d2 = h0*r2 + h1*r1 + h2*r0 + h3*5*r4 + h4*5*r3
547 // d1 = h0*r1 + h1*r0 + h2*5*r4 + h3*5*r3 + h4*5*r2
548 // d0 = h0*r0 + h1*5*r4 + h2*5*r3 + h3*5*r2 + h4*5*r1
549
550 subs $len,$len,#64
551 umull $ACC4,$IN23_0,${R4}[2]
552 csel $in2,$zeros,$in2,lo
553 umull $ACC3,$IN23_0,${R3}[2]
554 umull $ACC2,$IN23_0,${R2}[2]
555 ldp x8,x12,[$in2],#16 // inp[2:3] (or zero)
556 umull $ACC1,$IN23_0,${R1}[2]
557 ldp x9,x13,[$in2],#48
558 umull $ACC0,$IN23_0,${R0}[2]
559 #ifdef __ARMEB__
560 rev x8,x8
561 rev x12,x12
562 rev x9,x9
563 rev x13,x13
564 #endif
565
566 umlal $ACC4,$IN23_1,${R3}[2]
567 and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
568 umlal $ACC3,$IN23_1,${R2}[2]
569 and x5,x9,#0x03ffffff
570 umlal $ACC2,$IN23_1,${R1}[2]
571 ubfx x6,x8,#26,#26
572 umlal $ACC1,$IN23_1,${R0}[2]
573 ubfx x7,x9,#26,#26
574 umlal $ACC0,$IN23_1,${S4}[2]
575 add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
576
577 umlal $ACC4,$IN23_2,${R2}[2]
578 extr x8,x12,x8,#52
579 umlal $ACC3,$IN23_2,${R1}[2]
580 extr x9,x13,x9,#52
581 umlal $ACC2,$IN23_2,${R0}[2]
582 add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
583 umlal $ACC1,$IN23_2,${S4}[2]
584 fmov $IN23_0,x4
585 umlal $ACC0,$IN23_2,${S3}[2]
586 and x8,x8,#0x03ffffff
587
588 umlal $ACC4,$IN23_3,${R1}[2]
589 and x9,x9,#0x03ffffff
590 umlal $ACC3,$IN23_3,${R0}[2]
591 ubfx x10,x12,#14,#26
592 umlal $ACC2,$IN23_3,${S4}[2]
593 ubfx x11,x13,#14,#26
594 umlal $ACC1,$IN23_3,${S3}[2]
595 add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
596 umlal $ACC0,$IN23_3,${S2}[2]
597 fmov $IN23_1,x6
598
599 add $IN01_2,$IN01_2,$H2
600 add x12,$padbit,x12,lsr#40
601 umlal $ACC4,$IN23_4,${R0}[2]
602 add x13,$padbit,x13,lsr#40
603 umlal $ACC3,$IN23_4,${S4}[2]
604 add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
605 umlal $ACC2,$IN23_4,${S3}[2]
606 add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
607 umlal $ACC1,$IN23_4,${S2}[2]
608 fmov $IN23_2,x8
609 umlal $ACC0,$IN23_4,${S1}[2]
610 fmov $IN23_3,x10
611
612 ////////////////////////////////////////////////////////////////
613 // (hash+inp[0:1])*r^4 and accumulate
614
615 add $IN01_0,$IN01_0,$H0
616 fmov $IN23_4,x12
617 umlal $ACC3,$IN01_2,${R1}[0]
618 ldp x8,x12,[$inp],#16 // inp[0:1]
619 umlal $ACC0,$IN01_2,${S3}[0]
620 ldp x9,x13,[$inp],#48
621 umlal $ACC4,$IN01_2,${R2}[0]
622 umlal $ACC1,$IN01_2,${S4}[0]
623 umlal $ACC2,$IN01_2,${R0}[0]
624 #ifdef __ARMEB__
625 rev x8,x8
626 rev x12,x12
627 rev x9,x9
628 rev x13,x13
629 #endif
630
631 add $IN01_1,$IN01_1,$H1
632 umlal $ACC3,$IN01_0,${R3}[0]
633 umlal $ACC4,$IN01_0,${R4}[0]
634 and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
635 umlal $ACC2,$IN01_0,${R2}[0]
636 and x5,x9,#0x03ffffff
637 umlal $ACC0,$IN01_0,${R0}[0]
638 ubfx x6,x8,#26,#26
639 umlal $ACC1,$IN01_0,${R1}[0]
640 ubfx x7,x9,#26,#26
641
642 add $IN01_3,$IN01_3,$H3
643 add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
644 umlal $ACC3,$IN01_1,${R2}[0]
645 extr x8,x12,x8,#52
646 umlal $ACC4,$IN01_1,${R3}[0]
647 extr x9,x13,x9,#52
648 umlal $ACC0,$IN01_1,${S4}[0]
649 add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
650 umlal $ACC2,$IN01_1,${R1}[0]
651 fmov $IN01_0,x4
652 umlal $ACC1,$IN01_1,${R0}[0]
653 and x8,x8,#0x03ffffff
654
655 add $IN01_4,$IN01_4,$H4
656 and x9,x9,#0x03ffffff
657 umlal $ACC3,$IN01_3,${R0}[0]
658 ubfx x10,x12,#14,#26
659 umlal $ACC0,$IN01_3,${S2}[0]
660 ubfx x11,x13,#14,#26
661 umlal $ACC4,$IN01_3,${R1}[0]
662 add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
663 umlal $ACC1,$IN01_3,${S3}[0]
664 fmov $IN01_1,x6
665 umlal $ACC2,$IN01_3,${S4}[0]
666 add x12,$padbit,x12,lsr#40
667
668 umlal $ACC3,$IN01_4,${S4}[0]
669 add x13,$padbit,x13,lsr#40
670 umlal $ACC0,$IN01_4,${S1}[0]
671 add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
672 umlal $ACC4,$IN01_4,${R0}[0]
673 add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
674 umlal $ACC1,$IN01_4,${S2}[0]
675 fmov $IN01_2,x8
676 umlal $ACC2,$IN01_4,${S3}[0]
677 fmov $IN01_3,x10
678 fmov $IN01_4,x12
679
680 /////////////////////////////////////////////////////////////////
681 // lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
682 // and P. Schwabe
683 //
684 // [see discussion in poly1305-armv4 module]
685
686 ushr $T0.2d,$ACC3,#26
687 xtn $H3,$ACC3
688 ushr $T1.2d,$ACC0,#26
689 and $ACC0,$ACC0,$MASK.2d
690 add $ACC4,$ACC4,$T0.2d // h3 -> h4
691 bic $H3,#0xfc,lsl#24 // &=0x03ffffff
692 add $ACC1,$ACC1,$T1.2d // h0 -> h1
693
694 ushr $T0.2d,$ACC4,#26
695 xtn $H4,$ACC4
696 ushr $T1.2d,$ACC1,#26
697 xtn $H1,$ACC1
698 bic $H4,#0xfc,lsl#24
699 add $ACC2,$ACC2,$T1.2d // h1 -> h2
700
701 add $ACC0,$ACC0,$T0.2d
702 shl $T0.2d,$T0.2d,#2
703 shrn $T1.2s,$ACC2,#26
704 xtn $H2,$ACC2
705 add $ACC0,$ACC0,$T0.2d // h4 -> h0
706 bic $H1,#0xfc,lsl#24
707 add $H3,$H3,$T1.2s // h2 -> h3
708 bic $H2,#0xfc,lsl#24
709
710 shrn $T0.2s,$ACC0,#26
711 xtn $H0,$ACC0
712 ushr $T1.2s,$H3,#26
713 bic $H3,#0xfc,lsl#24
714 bic $H0,#0xfc,lsl#24
715 add $H1,$H1,$T0.2s // h0 -> h1
716 add $H4,$H4,$T1.2s // h3 -> h4
717
718 b.hi .Loop_neon
719
720 .Lskip_loop:
721 dup $IN23_2,${IN23_2}[0]
722 add $IN01_2,$IN01_2,$H2
723
724 ////////////////////////////////////////////////////////////////
725 // multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
726
727 adds $len,$len,#32
728 b.ne .Long_tail
729
730 dup $IN23_2,${IN01_2}[0]
731 add $IN23_0,$IN01_0,$H0
732 add $IN23_3,$IN01_3,$H3
733 add $IN23_1,$IN01_1,$H1
734 add $IN23_4,$IN01_4,$H4
735
736 .Long_tail:
737 dup $IN23_0,${IN23_0}[0]
738 umull2 $ACC0,$IN23_2,${S3}
739 umull2 $ACC3,$IN23_2,${R1}
740 umull2 $ACC4,$IN23_2,${R2}
741 umull2 $ACC2,$IN23_2,${R0}
742 umull2 $ACC1,$IN23_2,${S4}
743
744 dup $IN23_1,${IN23_1}[0]
745 umlal2 $ACC0,$IN23_0,${R0}
746 umlal2 $ACC2,$IN23_0,${R2}
747 umlal2 $ACC3,$IN23_0,${R3}
748 umlal2 $ACC4,$IN23_0,${R4}
749 umlal2 $ACC1,$IN23_0,${R1}
750
751 dup $IN23_3,${IN23_3}[0]
752 umlal2 $ACC0,$IN23_1,${S4}
753 umlal2 $ACC3,$IN23_1,${R2}
754 umlal2 $ACC2,$IN23_1,${R1}
755 umlal2 $ACC4,$IN23_1,${R3}
756 umlal2 $ACC1,$IN23_1,${R0}
757
758 dup $IN23_4,${IN23_4}[0]
759 umlal2 $ACC3,$IN23_3,${R0}
760 umlal2 $ACC4,$IN23_3,${R1}
761 umlal2 $ACC0,$IN23_3,${S2}
762 umlal2 $ACC1,$IN23_3,${S3}
763 umlal2 $ACC2,$IN23_3,${S4}
764
765 umlal2 $ACC3,$IN23_4,${S4}
766 umlal2 $ACC0,$IN23_4,${S1}
767 umlal2 $ACC4,$IN23_4,${R0}
768 umlal2 $ACC1,$IN23_4,${S2}
769 umlal2 $ACC2,$IN23_4,${S3}
770
771 b.eq .Lshort_tail
772
773 ////////////////////////////////////////////////////////////////
774 // (hash+inp[0:1])*r^4:r^3 and accumulate
775
776 add $IN01_0,$IN01_0,$H0
777 umlal $ACC3,$IN01_2,${R1}
778 umlal $ACC0,$IN01_2,${S3}
779 umlal $ACC4,$IN01_2,${R2}
780 umlal $ACC1,$IN01_2,${S4}
781 umlal $ACC2,$IN01_2,${R0}
782
783 add $IN01_1,$IN01_1,$H1
784 umlal $ACC3,$IN01_0,${R3}
785 umlal $ACC0,$IN01_0,${R0}
786 umlal $ACC4,$IN01_0,${R4}
787 umlal $ACC1,$IN01_0,${R1}
788 umlal $ACC2,$IN01_0,${R2}
789
790 add $IN01_3,$IN01_3,$H3
791 umlal $ACC3,$IN01_1,${R2}
792 umlal $ACC0,$IN01_1,${S4}
793 umlal $ACC4,$IN01_1,${R3}
794 umlal $ACC1,$IN01_1,${R0}
795 umlal $ACC2,$IN01_1,${R1}
796
797 add $IN01_4,$IN01_4,$H4
798 umlal $ACC3,$IN01_3,${R0}
799 umlal $ACC0,$IN01_3,${S2}
800 umlal $ACC4,$IN01_3,${R1}
801 umlal $ACC1,$IN01_3,${S3}
802 umlal $ACC2,$IN01_3,${S4}
803
804 umlal $ACC3,$IN01_4,${S4}
805 umlal $ACC0,$IN01_4,${S1}
806 umlal $ACC4,$IN01_4,${R0}
807 umlal $ACC1,$IN01_4,${S2}
808 umlal $ACC2,$IN01_4,${S3}
809
810 .Lshort_tail:
811 ////////////////////////////////////////////////////////////////
812 // horizontal add
813
814 addp $ACC3,$ACC3,$ACC3
815 ldp d8,d9,[sp,#16] // meet ABI requirements
816 addp $ACC0,$ACC0,$ACC0
817 ldp d10,d11,[sp,#32]
818 addp $ACC4,$ACC4,$ACC4
819 ldp d12,d13,[sp,#48]
820 addp $ACC1,$ACC1,$ACC1
821 ldp d14,d15,[sp,#64]
822 addp $ACC2,$ACC2,$ACC2
823
824 ////////////////////////////////////////////////////////////////
825 // lazy reduction, but without narrowing
826
827 ushr $T0.2d,$ACC3,#26
828 and $ACC3,$ACC3,$MASK.2d
829 ushr $T1.2d,$ACC0,#26
830 and $ACC0,$ACC0,$MASK.2d
831
832 add $ACC4,$ACC4,$T0.2d // h3 -> h4
833 add $ACC1,$ACC1,$T1.2d // h0 -> h1
834
835 ushr $T0.2d,$ACC4,#26
836 and $ACC4,$ACC4,$MASK.2d
837 ushr $T1.2d,$ACC1,#26
838 and $ACC1,$ACC1,$MASK.2d
839 add $ACC2,$ACC2,$T1.2d // h1 -> h2
840
841 add $ACC0,$ACC0,$T0.2d
842 shl $T0.2d,$T0.2d,#2
843 ushr $T1.2d,$ACC2,#26
844 and $ACC2,$ACC2,$MASK.2d
845 add $ACC0,$ACC0,$T0.2d // h4 -> h0
846 add $ACC3,$ACC3,$T1.2d // h2 -> h3
847
848 ushr $T0.2d,$ACC0,#26
849 and $ACC0,$ACC0,$MASK.2d
850 ushr $T1.2d,$ACC3,#26
851 and $ACC3,$ACC3,$MASK.2d
852 add $ACC1,$ACC1,$T0.2d // h0 -> h1
853 add $ACC4,$ACC4,$T1.2d // h3 -> h4
854
855 ////////////////////////////////////////////////////////////////
856 // write the result, can be partially reduced
857
858 st4 {$ACC0,$ACC1,$ACC2,$ACC3}[0],[$ctx],#16
859 st1 {$ACC4}[0],[$ctx]
860
861 .Lno_data_neon:
862 ldr x29,[sp],#80
863 ret
864 .size poly1305_blocks_neon,.-poly1305_blocks_neon
865
866 .type poly1305_emit_neon,%function
867 .align 5
868 poly1305_emit_neon:
869 ldr $is_base2_26,[$ctx,#24]
870 cbz $is_base2_26,poly1305_emit
871
872 ldp w10,w11,[$ctx] // load hash value base 2^26
873 ldp w12,w13,[$ctx,#8]
874 ldr w14,[$ctx,#16]
875
876 add $h0,x10,x11,lsl#26 // base 2^26 -> base 2^64
877 lsr $h1,x12,#12
878 adds $h0,$h0,x12,lsl#52
879 add $h1,$h1,x13,lsl#14
880 adc $h1,$h1,xzr
881 lsr $h2,x14,#24
882 adds $h1,$h1,x14,lsl#40
883 adc $h2,$h2,xzr // can be partially reduced...
884
885 ldp $t0,$t1,[$nonce] // load nonce
886
887 and $d0,$h2,#-4 // ... so reduce
888 add $d0,$d0,$h2,lsr#2
889 and $h2,$h2,#3
890 adds $h0,$h0,$d0
891 adcs $h1,$h1,xzr
892 adc $h2,$h2,xzr
893
894 adds $d0,$h0,#5 // compare to modulus
895 adcs $d1,$h1,xzr
896 adc $d2,$h2,xzr
897
898 tst $d2,#-4 // see if it's carried/borrowed
899
900 csel $h0,$h0,$d0,eq
901 csel $h1,$h1,$d1,eq
902
903 #ifdef __ARMEB__
904 ror $t0,$t0,#32 // flip nonce words
905 ror $t1,$t1,#32
906 #endif
907 adds $h0,$h0,$t0 // accumulate nonce
908 adc $h1,$h1,$t1
909 #ifdef __ARMEB__
910 rev $h0,$h0 // flip output bytes
911 rev $h1,$h1
912 #endif
913 stp $h0,$h1,[$mac] // write result
914
915 ret
916 .size poly1305_emit_neon,.-poly1305_emit_neon
917
918 .align 5
919 .Lzeros:
920 .long 0,0,0,0,0,0,0,0
921 .LOPENSSL_armcap_P:
922 #ifdef __ILP32__
923 .long OPENSSL_armcap_P-.
924 #else
925 .quad OPENSSL_armcap_P-.
926 #endif
927 .asciz "Poly1305 for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
928 .align 2
929 ___
930
931 foreach (split("\n",$code)) {
932 s/\b(shrn\s+v[0-9]+)\.[24]d/$1.2s/ or
933 s/\b(fmov\s+)v([0-9]+)[^,]*,\s*x([0-9]+)/$1d$2,x$3/ or
934 (m/\bdup\b/ and (s/\.[24]s/.2d/g or 1)) or
935 (m/\b(eor|and)/ and (s/\.[248][sdh]/.16b/g or 1)) or
936 (m/\bum(ul|la)l\b/ and (s/\.4s/.2s/g or 1)) or
937 (m/\bum(ul|la)l2\b/ and (s/\.2s/.4s/g or 1)) or
938 (m/\bst[1-4]\s+{[^}]+}\[/ and (s/\.[24]d/.s/g or 1));
939
940 s/\.[124]([sd])\[/.$1\[/;
941
942 print $_,"\n";
943 }
944 close STDOUT;