2 # Copyright 2015-2020 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the Apache License 2.0 (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
17 # ECP_NISTZ256 module for ARMv4.
21 # Original ECP_NISTZ256 submission targeting x86_64 is detailed in
22 # http://eprint.iacr.org/2013/816. In the process of adaptation
23 # original .c module was made 32-bit savvy in order to make this
24 # implementation possible.
26 # with/without -DECP_NISTZ256_ASM
29 # Cortex-A15 +100-316%
30 # Snapdragon S4 +66-187%
32 # Ranges denote minimum and maximum improvement coefficients depending
33 # on benchmark. Lower coefficients are for ECDSA sign, server-side
34 # operation. Keep in mind that +200% means 3x improvement.
36 # $output is the last argument if it looks like a file (it has an extension)
37 # $flavour is the first argument if it doesn't look like a file
38 $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m
|\
.\w
+$| ?
pop : undef;
39 $flavour = $#ARGV >= 0 && $ARGV[0] !~ m
|\
.| ?
shift : undef;
41 if ($flavour && $flavour ne "void") {
42 $0 =~ m/(.*[\/\\])[^\
/\\]+$/; $dir=$1;
43 ( $xlate="${dir}arm-xlate.pl" and -f
$xlate ) or
44 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f
$xlate) or
45 die "can't locate arm-xlate.pl";
47 open STDOUT
,"| \"$^X\" $xlate $flavour \"$output\""
48 or die "can't call $xlate: $!";
50 $output and open STDOUT
,">$output";
56 #if defined(__thumb2__)
63 ########################################################################
64 # Convert ecp_nistz256_table.c to layout expected by ecp_nistz_gather_w7
66 $0 =~ m/(.*[\/\\])[^\
/\\]+$/; $dir=$1;
67 open TABLE
,"<ecp_nistz256_table.c" or
68 open TABLE
,"<${dir}../ecp_nistz256_table.c" or
69 die "failed to open ecp_nistz256_table.c:",$!;
74 s/TOBN\(\s*(0x[0-9a-f]+),\s*(0x[0-9a-f]+)\s*\)/push @arr,hex($2),hex($1)/geo;
78 # See ecp_nistz256_table.c for explanation for why it's 64*16*37.
79 # 64*16*37-1 is because $#arr returns last valid index or @arr, not
81 die "insane number of elements" if ($#arr != 64*16*37-1);
85 .globl ecp_nistz256_precomputed
86 .type ecp_nistz256_precomputed
,%object
88 ecp_nistz256_precomputed
:
90 ########################################################################
91 # this conversion smashes P256_POINT_AFFINE by individual bytes with
92 # 64 byte interval, similar to
96 @tbl = splice(@arr,0,64*16);
97 for($i=0;$i<64;$i++) {
99 for($j=0;$j<64;$j++) {
100 push @line,(@tbl[$j*16+$i/4]>>(($i%4)*8))&0xff;
103 $code.=join(',',map { sprintf "0x%02x",$_} @line);
108 .size ecp_nistz256_precomputed
,.-ecp_nistz256_precomputed
112 .LRR
: @
2^512 mod P precomputed
for NIST P256 polynomial
113 .long
0x00000003, 0x00000000, 0xffffffff, 0xfffffffb
114 .long
0xfffffffe, 0xffffffff, 0xfffffffd, 0x00000004
116 .long
1,0,0,0,0,0,0,0
117 .asciz
"ECP_NISTZ256 for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
121 ########################################################################
122 # common register layout, note that $t2 is link register, so that if
123 # internal subroutine uses $t2, then it has to offload lr...
125 ($r_ptr,$a_ptr,$b_ptr,$ff,$a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7,$t1,$t2)=
126 map("r$_",(0..12,14));
127 ($t0,$t3)=($ff,$a_ptr);
130 @ void ecp_nistz256_to_mont
(BN_ULONG r0
[8],const BN_ULONG r1
[8]);
131 .globl ecp_nistz256_to_mont
132 .type ecp_nistz256_to_mont
,%function
133 ecp_nistz256_to_mont
:
135 b
.Lecp_nistz256_mul_mont
136 .size ecp_nistz256_to_mont
,.-ecp_nistz256_to_mont
138 @ void ecp_nistz256_from_mont
(BN_ULONG r0
[8],const BN_ULONG r1
[8]);
139 .globl ecp_nistz256_from_mont
140 .type ecp_nistz256_from_mont
,%function
141 ecp_nistz256_from_mont
:
143 b
.Lecp_nistz256_mul_mont
144 .size ecp_nistz256_from_mont
,.-ecp_nistz256_from_mont
146 @ void ecp_nistz256_mul_by_2
(BN_ULONG r0
[8],const BN_ULONG r1
[8]);
147 .globl ecp_nistz256_mul_by_2
148 .type ecp_nistz256_mul_by_2
,%function
150 ecp_nistz256_mul_by_2
:
151 stmdb sp
!,{r4
-r12
,lr
}
152 bl __ecp_nistz256_mul_by_2
153 #if __ARM_ARCH__>=5 || !defined(__thumb__)
154 ldmia sp
!,{r4
-r12
,pc
}
156 ldmia sp
!,{r4
-r12
,lr
}
157 bx lr @ interoperable with Thumb ISA
:-)
159 .size ecp_nistz256_mul_by_2
,.-ecp_nistz256_mul_by_2
161 .type __ecp_nistz256_mul_by_2
,%function
163 __ecp_nistz256_mul_by_2
:
167 adds
$a0,$a0,$a0 @ a
[0:7]+=a
[0:7], i
.e
. add with itself
184 .size __ecp_nistz256_mul_by_2
,.-__ecp_nistz256_mul_by_2
186 @ void ecp_nistz256_add
(BN_ULONG r0
[8],const BN_ULONG r1
[8],
187 @ const BN_ULONG r2
[8]);
188 .globl ecp_nistz256_add
189 .type ecp_nistz256_add
,%function
192 stmdb sp
!,{r4
-r12
,lr
}
193 bl __ecp_nistz256_add
194 #if __ARM_ARCH__>=5 || !defined(__thumb__)
195 ldmia sp
!,{r4
-r12
,pc
}
197 ldmia sp
!,{r4
-r12
,lr
}
198 bx lr @ interoperable with Thumb ISA
:-)
200 .size ecp_nistz256_add
,.-ecp_nistz256_add
202 .type __ecp_nistz256_add
,%function
205 str lr
,[sp
,#-4]! @ push lr
233 ldr lr
,[sp
],#4 @ pop lr
237 @
if a
+b
>= modulus
, subtract modulus
.
239 @ But since comparison implies subtraction
, we subtract
240 @ modulus
and then add it back
if subtraction borrowed
.
252 @ Note that because mod has special form
, i
.e
. consists of
253 @
0xffffffff, 1 and 0s
, we can conditionally synthesize it by
254 @ using value of borrow as a whole
or extracting single bit
.
255 @ Follow
$ff register
...
257 adds
$a0,$a0,$ff @ add synthesized modulus
268 adcs
$a6,$a6,$ff,lsr
#31
275 .size __ecp_nistz256_add
,.-__ecp_nistz256_add
277 @ void ecp_nistz256_mul_by_3
(BN_ULONG r0
[8],const BN_ULONG r1
[8]);
278 .globl ecp_nistz256_mul_by_3
279 .type ecp_nistz256_mul_by_3
,%function
281 ecp_nistz256_mul_by_3
:
282 stmdb sp
!,{r4
-r12
,lr
}
283 bl __ecp_nistz256_mul_by_3
284 #if __ARM_ARCH__>=5 || !defined(__thumb__)
285 ldmia sp
!,{r4
-r12
,pc
}
287 ldmia sp
!,{r4
-r12
,lr
}
288 bx lr @ interoperable with Thumb ISA
:-)
290 .size ecp_nistz256_mul_by_3
,.-ecp_nistz256_mul_by_3
292 .type __ecp_nistz256_mul_by_3
,%function
294 __ecp_nistz256_mul_by_3
:
295 str lr
,[sp
,#-4]! @ push lr
297 @ As multiplication by
3 is performed as
2*n
+n
, below are inline
298 @ copies of __ecp_nistz256_mul_by_2
and __ecp_nistz256_add
, see
299 @ corresponding subroutines
for details
.
304 adds
$a0,$a0,$a0 @ a
[0:7]+=a
[0:7]
320 subs
$a0,$a0,#-1 @ .Lreduce_by_sub but without stores
330 adds
$a0,$a0,$ff @ add synthesized modulus
335 ldr
$b_ptr,[$a_ptr,#0]
338 adcs
$a6,$a6,$ff,lsr
#31
343 adds
$a0,$a0,$b_ptr @
2*a
[0:7]+=a
[0:7]
344 ldr
$b_ptr,[$a_ptr,#16]
357 ldr lr
,[sp
],#4 @ pop lr
360 .size ecp_nistz256_mul_by_3
,.-ecp_nistz256_mul_by_3
362 @ void ecp_nistz256_div_by_2
(BN_ULONG r0
[8],const BN_ULONG r1
[8]);
363 .globl ecp_nistz256_div_by_2
364 .type ecp_nistz256_div_by_2
,%function
366 ecp_nistz256_div_by_2
:
367 stmdb sp
!,{r4
-r12
,lr
}
368 bl __ecp_nistz256_div_by_2
369 #if __ARM_ARCH__>=5 || !defined(__thumb__)
370 ldmia sp
!,{r4
-r12
,pc
}
372 ldmia sp
!,{r4
-r12
,lr
}
373 bx lr @ interoperable with Thumb ISA
:-)
375 .size ecp_nistz256_div_by_2
,.-ecp_nistz256_div_by_2
377 .type __ecp_nistz256_div_by_2
,%function
379 __ecp_nistz256_div_by_2
:
380 @ ret
= (a is odd ? a
+mod
: a
) >> 1
385 mov
$ff,$a0,lsl
#31 @ place least significant bit to most
386 @ significant position
, now arithmetic
387 @ right
shift by
31 will produce
-1 or
388 @
0, while logical right
shift 1 or 0,
389 @ this is how modulus is conditionally
390 @ synthesized
in this case
...
392 adds
$a0,$a0,$ff,asr
#31
394 adcs
$a1,$a1,$ff,asr
#31
396 adcs
$a2,$a2,$ff,asr
#31
401 mov
$a0,$a0,lsr
#1 @ a[0:7]>>=1, we can start early
402 @ because it doesn
't affect flags
404 orr $a0,$a0,$a1,lsl#31
405 adcs $a6,$a6,$ff,lsr#31
407 adcs $a7,$a7,$ff,asr#31
409 adc $b_ptr,$b_ptr,#0 @ top-most carry bit from addition
411 orr $a1,$a1,$a2,lsl#31
414 orr $a2,$a2,$a3,lsl#31
417 orr $a3,$a3,$a4,lsl#31
420 orr $a4,$a4,$a5,lsl#31
423 orr $a5,$a5,$a6,lsl#31
426 orr $a6,$a6,$a7,lsl#31
429 orr $a7,$a7,$b_ptr,lsl#31 @ don't forget the top
-most carry bit
434 .size __ecp_nistz256_div_by_2
,.-__ecp_nistz256_div_by_2
436 @ void ecp_nistz256_sub
(BN_ULONG r0
[8],const BN_ULONG r1
[8],
437 @ const BN_ULONG r2
[8]);
438 .globl ecp_nistz256_sub
439 .type ecp_nistz256_sub
,%function
442 stmdb sp
!,{r4
-r12
,lr
}
443 bl __ecp_nistz256_sub
444 #if __ARM_ARCH__>=5 || !defined(__thumb__)
445 ldmia sp
!,{r4
-r12
,pc
}
447 ldmia sp
!,{r4
-r12
,lr
}
448 bx lr @ interoperable with Thumb ISA
:-)
450 .size ecp_nistz256_sub
,.-ecp_nistz256_sub
452 .type __ecp_nistz256_sub
,%function
455 str lr
,[sp
,#-4]! @ push lr
481 sbc
$ff,$ff,$ff @ broadcast borrow bit
482 ldr lr
,[sp
],#4 @ pop lr
486 @
if a
-b borrows
, add modulus
.
488 @ Note that because mod has special form
, i
.e
. consists of
489 @
0xffffffff, 1 and 0s
, we can conditionally synthesize it by
490 @ broadcasting borrow bit to a register
, $ff, and using it as
491 @ a whole
or extracting single bit
.
493 adds
$a0,$a0,$ff @ add synthesized modulus
504 adcs
$a6,$a6,$ff,lsr
#31
511 .size __ecp_nistz256_sub
,.-__ecp_nistz256_sub
513 @ void ecp_nistz256_neg
(BN_ULONG r0
[8],const BN_ULONG r1
[8]);
514 .globl ecp_nistz256_neg
515 .type ecp_nistz256_neg
,%function
518 stmdb sp
!,{r4
-r12
,lr
}
519 bl __ecp_nistz256_neg
520 #if __ARM_ARCH__>=5 || !defined(__thumb__)
521 ldmia sp
!,{r4
-r12
,pc
}
523 ldmia sp
!,{r4
-r12
,lr
}
524 bx lr @ interoperable with Thumb ISA
:-)
526 .size ecp_nistz256_neg
,.-ecp_nistz256_neg
528 .type __ecp_nistz256_neg
,%function
551 .size __ecp_nistz256_neg
,.-__ecp_nistz256_neg
554 my @acc=map("r$_",(3..11));
555 my ($t0,$t1,$bj,$t2,$t3)=map("r$_",(0,1,2,12,14));
558 @ void ecp_nistz256_sqr_mont
(BN_ULONG r0
[8],const BN_ULONG r1
[8]);
559 .globl ecp_nistz256_sqr_mont
560 .type ecp_nistz256_sqr_mont
,%function
562 ecp_nistz256_sqr_mont
:
564 b
.Lecp_nistz256_mul_mont
565 .size ecp_nistz256_sqr_mont
,.-ecp_nistz256_sqr_mont
567 @ void ecp_nistz256_mul_mont
(BN_ULONG r0
[8],const BN_ULONG r1
[8],
568 @ const BN_ULONG r2
[8]);
569 .globl ecp_nistz256_mul_mont
570 .type ecp_nistz256_mul_mont
,%function
572 ecp_nistz256_mul_mont
:
573 .Lecp_nistz256_mul_mont
:
574 stmdb sp
!,{r4
-r12
,lr
}
575 bl __ecp_nistz256_mul_mont
576 #if __ARM_ARCH__>=5 || !defined(__thumb__)
577 ldmia sp
!,{r4
-r12
,pc
}
579 ldmia sp
!,{r4
-r12
,lr
}
580 bx lr @ interoperable with Thumb ISA
:-)
582 .size ecp_nistz256_mul_mont
,.-ecp_nistz256_mul_mont
584 .type __ecp_nistz256_mul_mont
,%function
586 __ecp_nistz256_mul_mont
:
587 stmdb sp
!,{r0
-r2
,lr
} @ make a copy of arguments too
589 ldr
$bj,[$b_ptr,#0] @ b[0]
590 ldmia
$a_ptr,{@acc[1]-@acc[8]}
592 umull
@acc[0],$t3,@acc[1],$bj @ r
[0]=a
[0]*b
[0]
593 stmdb sp
!,{$acc[1]-@acc[8]} @ copy a
[0-7] to stack
, so
594 @ that it can be addressed
595 @ without spending register
597 umull
@acc[1],$t0,@acc[2],$bj @ r
[1]=a
[1]*b
[0]
598 umull
@acc[2],$t1,@acc[3],$bj
599 adds
@acc[1],@acc[1],$t3 @ accumulate high part of mult
600 umull
@acc[3],$t2,@acc[4],$bj
601 adcs
@acc[2],@acc[2],$t0
602 umull
@acc[4],$t3,@acc[5],$bj
603 adcs
@acc[3],@acc[3],$t1
604 umull
@acc[5],$t0,@acc[6],$bj
605 adcs
@acc[4],@acc[4],$t2
606 umull
@acc[6],$t1,@acc[7],$bj
607 adcs
@acc[5],@acc[5],$t3
608 umull
@acc[7],$t2,@acc[8],$bj
609 adcs
@acc[6],@acc[6],$t0
610 adcs
@acc[7],@acc[7],$t1
611 eor
$t3,$t3,$t3 @ first overflow bit is zero
614 for(my $i=1;$i<8;$i++) {
617 # Reduction iteration is normally performed by accumulating
618 # result of multiplication of modulus by "magic" digit [and
619 # omitting least significant word, which is guaranteed to
620 # be 0], but thanks to special form of modulus and "magic"
621 # digit being equal to least significant word, it can be
622 # performed with additions and subtractions alone. Indeed:
624 # ffff.0001.0000.0000.0000.ffff.ffff.ffff
626 # + xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.abcd
628 # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
631 # xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.abcd
632 # + abcd.0000.abcd.0000.0000.abcd.0000.0000.0000
633 # - abcd.0000.0000.0000.0000.0000.0000.abcd
635 # or marking redundant operations:
637 # xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.----
638 # + abcd.0000.abcd.0000.0000.abcd.----.----.----
639 # - abcd.----.----.----.----.----.----.----
642 @ multiplication
-less reduction
$i
643 adds
@acc[3],@acc[3],@acc[0] @ r
[3]+=r
[0]
644 ldr
$bj,[sp
,#40] @ restore b_ptr
645 adcs
@acc[4],@acc[4],#0 @ r[4]+=0
646 adcs
@acc[5],@acc[5],#0 @ r[5]+=0
647 adcs
@acc[6],@acc[6],@acc[0] @ r
[6]+=r
[0]
648 ldr
$t1,[sp
,#0] @ load a[0]
649 adcs
@acc[7],@acc[7],#0 @ r[7]+=0
650 ldr
$bj,[$bj,#4*$i] @ load b[i]
651 adcs
@acc[8],@acc[8],@acc[0] @ r
[8]+=r
[0]
653 adc
$t3,$t3,#0 @ overflow bit
654 subs
@acc[7],@acc[7],@acc[0] @ r
[7]-=r
[0]
655 ldr
$t2,[sp
,#4] @ a[1]
656 sbcs
@acc[8],@acc[8],#0 @ r[8]-=0
657 umlal
@acc[1],$t0,$t1,$bj @
"r[0]"+=a
[0]*b
[i
]
659 sbc
@acc[0],$t3,#0 @ overflow bit, keep in mind
660 @ that netto result is
661 @ addition of a value which
662 @ makes underflow impossible
664 ldr
$t3,[sp
,#8] @ a[2]
665 umlal
@acc[2],$t1,$t2,$bj @
"r[1]"+=a
[1]*b
[i
]
666 str
@acc[0],[sp
,#36] @ temporarily offload overflow
668 ldr
$t4,[sp
,#12] @ a[3], $t4 is alias @acc[0]
669 umlal
@acc[3],$t2,$t3,$bj @
"r[2]"+=a
[2]*b
[i
]
671 adds
@acc[2],@acc[2],$t0 @ accumulate high part of mult
672 ldr
$t0,[sp
,#16] @ a[4]
673 umlal
@acc[4],$t3,$t4,$bj @
"r[3]"+=a
[3]*b
[i
]
675 adcs
@acc[3],@acc[3],$t1
676 ldr
$t1,[sp
,#20] @ a[5]
677 umlal
@acc[5],$t4,$t0,$bj @
"r[4]"+=a
[4]*b
[i
]
679 adcs
@acc[4],@acc[4],$t2
680 ldr
$t2,[sp
,#24] @ a[6]
681 umlal
@acc[6],$t0,$t1,$bj @
"r[5]"+=a
[5]*b
[i
]
683 adcs
@acc[5],@acc[5],$t3
684 ldr
$t3,[sp
,#28] @ a[7]
685 umlal
@acc[7],$t1,$t2,$bj @
"r[6]"+=a
[6]*b
[i
]
687 adcs
@acc[6],@acc[6],$t4
688 ldr
@acc[0],[sp
,#36] @ restore overflow bit
689 umlal
@acc[8],$t2,$t3,$bj @
"r[7]"+=a
[7]*b
[i
]
691 adcs
@acc[7],@acc[7],$t0
692 adcs
@acc[8],@acc[8],$t1
693 adcs
@acc[0],$acc[0],$t2
694 adc
$t3,$t3,#0 @ new overflow bit
696 push(@acc,shift(@acc)); # rotate registers, so that
697 # "r[i]" becomes r[i]
700 @
last multiplication
-less reduction
701 adds
@acc[3],@acc[3],@acc[0]
702 ldr
$r_ptr,[sp
,#32] @ restore r_ptr
703 adcs
@acc[4],@acc[4],#0
704 adcs
@acc[5],@acc[5],#0
705 adcs
@acc[6],@acc[6],@acc[0]
706 adcs
@acc[7],@acc[7],#0
707 adcs
@acc[8],@acc[8],@acc[0]
709 subs
@acc[7],@acc[7],@acc[0]
710 sbcs
@acc[8],@acc[8],#0
711 sbc
@acc[0],$t3,#0 @ overflow bit
713 @ Final step is
"if result > mod, subtract mod", but we
do it
714 @
"other way around", namely subtract modulus from result
715 @
and if it borrowed
, add modulus back
.
717 adds
@acc[1],@acc[1],#1 @ subs @acc[1],@acc[1],#-1
718 adcs
@acc[2],@acc[2],#0 @ sbcs @acc[2],@acc[2],#-1
719 adcs
@acc[3],@acc[3],#0 @ sbcs @acc[3],@acc[3],#-1
720 sbcs
@acc[4],@acc[4],#0
721 sbcs
@acc[5],@acc[5],#0
722 sbcs
@acc[6],@acc[6],#0
723 sbcs
@acc[7],@acc[7],#1
724 adcs
@acc[8],@acc[8],#0 @ sbcs @acc[8],@acc[8],#-1
725 ldr lr
,[sp
,#44] @ restore lr
726 sbc
@acc[0],@acc[0],#0 @ broadcast borrow bit
729 @ Note that because mod has special form
, i
.e
. consists of
730 @
0xffffffff, 1 and 0s
, we can conditionally synthesize it by
731 @ broadcasting borrow bit to a register
, @acc[0], and using it as
732 @ a whole
or extracting single bit
.
734 adds
@acc[1],@acc[1],@acc[0] @ add modulus
or zero
735 adcs
@acc[2],@acc[2],@acc[0]
736 str
@acc[1],[$r_ptr,#0]
737 adcs
@acc[3],@acc[3],@acc[0]
738 str
@acc[2],[$r_ptr,#4]
739 adcs
@acc[4],@acc[4],#0
740 str
@acc[3],[$r_ptr,#8]
741 adcs
@acc[5],@acc[5],#0
742 str
@acc[4],[$r_ptr,#12]
743 adcs
@acc[6],@acc[6],#0
744 str
@acc[5],[$r_ptr,#16]
745 adcs
@acc[7],@acc[7],@acc[0],lsr
#31
746 str
@acc[6],[$r_ptr,#20]
747 adc
@acc[8],@acc[8],@acc[0]
748 str
@acc[7],[$r_ptr,#24]
749 str
@acc[8],[$r_ptr,#28]
752 .size __ecp_nistz256_mul_mont
,.-__ecp_nistz256_mul_mont
757 my ($out,$inp,$index,$mask)=map("r$_",(0..3));
759 @ void ecp_nistz256_scatter_w5
(void
*r0
,const P256_POINT
*r1
,
761 .globl ecp_nistz256_scatter_w5
762 .type ecp_nistz256_scatter_w5
,%function
764 ecp_nistz256_scatter_w5
:
767 add
$out,$out,$index,lsl
#2
769 ldmia
$inp!,{r4
-r11
} @ X
770 str r4
,[$out,#64*0-4]
771 str r5
,[$out,#64*1-4]
772 str r6
,[$out,#64*2-4]
773 str r7
,[$out,#64*3-4]
774 str r8
,[$out,#64*4-4]
775 str r9
,[$out,#64*5-4]
776 str r10
,[$out,#64*6-4]
777 str r11
,[$out,#64*7-4]
780 ldmia
$inp!,{r4
-r11
} @ Y
781 str r4
,[$out,#64*0-4]
782 str r5
,[$out,#64*1-4]
783 str r6
,[$out,#64*2-4]
784 str r7
,[$out,#64*3-4]
785 str r8
,[$out,#64*4-4]
786 str r9
,[$out,#64*5-4]
787 str r10
,[$out,#64*6-4]
788 str r11
,[$out,#64*7-4]
791 ldmia
$inp,{r4
-r11
} @ Z
792 str r4
,[$out,#64*0-4]
793 str r5
,[$out,#64*1-4]
794 str r6
,[$out,#64*2-4]
795 str r7
,[$out,#64*3-4]
796 str r8
,[$out,#64*4-4]
797 str r9
,[$out,#64*5-4]
798 str r10
,[$out,#64*6-4]
799 str r11
,[$out,#64*7-4]
802 #if __ARM_ARCH__>=5 || defined(__thumb__)
807 .size ecp_nistz256_scatter_w5
,.-ecp_nistz256_scatter_w5
809 @ void ecp_nistz256_gather_w5
(P256_POINT
*r0
,const void
*r1
,
811 .globl ecp_nistz256_gather_w5
812 .type ecp_nistz256_gather_w5
,%function
814 ecp_nistz256_gather_w5
:
822 subne
$index,$index,#1
824 add
$inp,$inp,$index,lsl
#2
843 stmia
$out!,{r4
-r11
} @ X
862 stmia
$out!,{r4
-r11
} @ Y
880 stmia
$out,{r4
-r11
} @ Z
883 #if __ARM_ARCH__>=5 || defined(__thumb__)
888 .size ecp_nistz256_gather_w5
,.-ecp_nistz256_gather_w5
890 @ void ecp_nistz256_scatter_w7
(void
*r0
,const P256_POINT_AFFINE
*r1
,
892 .globl ecp_nistz256_scatter_w7
893 .type ecp_nistz256_scatter_w7
,%function
895 ecp_nistz256_scatter_w7
:
900 subs
$index,$index,#1
901 strb
$mask,[$out,#64*0]
902 mov
$mask,$mask,lsr
#8
903 strb
$mask,[$out,#64*1]
904 mov
$mask,$mask,lsr
#8
905 strb
$mask,[$out,#64*2]
906 mov
$mask,$mask,lsr
#8
907 strb
$mask,[$out,#64*3]
911 #if __ARM_ARCH__>=5 || defined(__thumb__)
916 .size ecp_nistz256_scatter_w7
,.-ecp_nistz256_scatter_w7
918 @ void ecp_nistz256_gather_w7
(P256_POINT_AFFINE
*r0
,const void
*r1
,
920 .globl ecp_nistz256_gather_w7
921 .type ecp_nistz256_gather_w7
,%function
923 ecp_nistz256_gather_w7
:
931 subne
$index,$index,#1
938 subs
$index,$index,#1
951 #if __ARM_ARCH__>=5 || defined(__thumb__)
956 .size ecp_nistz256_gather_w7
,.-ecp_nistz256_gather_w7
960 # In comparison to integer-only equivalent of below subroutine:
966 # As not all time is spent in multiplication, overall impact is deemed
967 # too low to care about.
969 my ($A0,$A1,$A2,$A3,$Bi,$zero,$temp)=map("d$_",(0..7));
972 my @AxB=map("q$_",(8..15));
974 my ($rptr,$aptr,$bptr,$toutptr)=map("r$_",(0..3));
980 .globl ecp_nistz256_mul_mont_neon
981 .type ecp_nistz256_mul_mont_neon
,%function
983 ecp_nistz256_mul_mont_neon
:
986 vstmdb sp
!,{q4
-q5
} @ ABI specification says so
989 vld1
.32
{${Bi
}[0]},[$bptr,:32]!
990 veor
$zero,$zero,$zero
991 vld1
.32
{$A0-$A3}, [$aptr] @ can
't specify :32 :-(
993 mov sp,$toutptr @ alloca
994 vmov.i64 $mask,#0xffff
996 vmull.u32 @AxB[0],$Bi,${A0}[0]
997 vmull.u32 @AxB[1],$Bi,${A0}[1]
998 vmull.u32 @AxB[2],$Bi,${A1}[0]
999 vmull.u32 @AxB[3],$Bi,${A1}[1]
1000 vshr.u64 $temp,@AxB[0]#lo,#16
1001 vmull.u32 @AxB[4],$Bi,${A2}[0]
1002 vadd.u64 @AxB[0]#hi,@AxB[0]#hi,$temp
1003 vmull.u32 @AxB[5],$Bi,${A2}[1]
1004 vshr.u64 $temp,@AxB[0]#hi,#16 @ upper 32 bits of a[0]*b[0]
1005 vmull.u32 @AxB[6],$Bi,${A3}[0]
1006 vand.u64 @AxB[0],@AxB[0],$mask @ lower 32 bits of a[0]*b[0]
1007 vmull.u32 @AxB[7],$Bi,${A3}[1]
1009 for($i=1;$i<8;$i++) {
1011 vld1.32 {${Bi}[0]},[$bptr,:32]!
1012 veor $zero,$zero,$zero
1013 vadd.u64 @AxB[1]#lo,@AxB[1]#lo,$temp @ reduction
1014 vshl.u64 $mult,@AxB[0],#32
1015 vadd.u64 @AxB[3],@AxB[3],@AxB[0]
1016 vsub.u64 $mult,$mult,@AxB[0]
1018 vadd.u64 @AxB[6],@AxB[6],@AxB[0]
1019 vadd.u64 @AxB[7],@AxB[7],$mult
1021 push(@AxB,shift(@AxB));
1023 vmlal.u32 @AxB[0],$Bi,${A0}[0]
1024 vmlal.u32 @AxB[1],$Bi,${A0}[1]
1025 vmlal.u32 @AxB[2],$Bi,${A1}[0]
1026 vmlal.u32 @AxB[3],$Bi,${A1}[1]
1027 vshr.u64 $temp,@AxB[0]#lo,#16
1028 vmlal.u32 @AxB[4],$Bi,${A2}[0]
1029 vadd.u64 @AxB[0]#hi,@AxB[0]#hi,$temp
1030 vmlal.u32 @AxB[5],$Bi,${A2}[1]
1031 vshr.u64 $temp,@AxB[0]#hi,#16 @ upper 33 bits of a[0]*b[i]+t[0]
1032 vmlal.u32 @AxB[6],$Bi,${A3}[0]
1033 vand.u64 @AxB[0],@AxB[0],$mask @ lower 32 bits of a[0]*b[0]
1034 vmull.u32 @AxB[7],$Bi,${A3}[1]
1038 vadd.u64 @AxB[1]#lo,@AxB[1]#lo,$temp @ last reduction
1039 vshl.u64 $mult,@AxB[0],#32
1040 vadd.u64 @AxB[3],@AxB[3],@AxB[0]
1041 vsub.u64 $mult,$mult,@AxB[0]
1042 vadd.u64 @AxB[6],@AxB[6],@AxB[0]
1043 vadd.u64 @AxB[7],@AxB[7],$mult
1045 vshr.u64 $temp,@AxB[1]#lo,#16 @ convert
1046 vadd.u64 @AxB[1]#hi,@AxB[1]#hi,$temp
1047 vshr.u64 $temp,@AxB[1]#hi,#16
1048 vzip.16 @AxB[1]#lo,@AxB[1]#hi
1052 vadd.u64 @AxB[$_]#lo,@AxB[$_]#lo,$temp
1053 vst1.32 {@AxB[$_-1]#lo[0]},[$toutptr,:32]!
1054 vshr.u64 $temp,@AxB[$_]#lo,#16
1055 vadd.u64 @AxB[$_]#hi,@AxB[$_]#hi,$temp
1056 vshr.u64 $temp,@AxB[$_]#hi,#16
1057 vzip.16 @AxB[$_]#lo,@AxB[$_]#hi
1061 vst1.32 {@AxB[7]#lo[0]},[$toutptr,:32]!
1062 vst1.32 {$temp},[$toutptr] @ upper 33 bits
1078 ldr r9,[sp,#32] @ top-most bit
1096 adcs r7,r7,r9,lsr#31
1104 .size ecp_nistz256_mul_mont_neon,.-ecp_nistz256_mul_mont_neon
1110 ########################################################################
1111 # Below $aN assignment matches order in which 256-bit result appears in
1112 # register bank at return from __ecp_nistz256_mul_mont, so that we can
1113 # skip over reloading it from memory. This means that below functions
1114 # use custom calling sequence accepting 256-bit input in registers,
1115 # output pointer in r0, $r_ptr, and optional pointer in r2, $b_ptr.
1117 # See their "normal" counterparts for insights on calculations.
1119 my ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7,
1120 $t0,$t1,$t2,$t3)=map("r$_",(11,3..10,12,14,1));
1124 .type __ecp_nistz256_sub_from,%function
1126 __ecp_nistz256_sub_from:
1127 str lr,[sp,#-4]! @ push lr
1132 ldr $t3,[$b_ptr,#12]
1134 ldr $t0,[$b_ptr,#16]
1136 ldr $t1,[$b_ptr,#20]
1138 ldr $t2,[$b_ptr,#24]
1140 ldr $t3,[$b_ptr,#28]
1145 sbc $ff,$ff,$ff @ broadcast borrow bit
1146 ldr lr,[sp],#4 @ pop lr
1148 adds $a0,$a0,$ff @ add synthesized modulus
1156 str $a3,[$r_ptr,#12]
1158 str $a4,[$r_ptr,#16]
1159 adcs $a6,$a6,$ff,lsr#31
1160 str $a5,[$r_ptr,#20]
1162 str $a6,[$r_ptr,#24]
1163 str $a7,[$r_ptr,#28]
1166 .size __ecp_nistz256_sub_from,.-__ecp_nistz256_sub_from
1168 .type __ecp_nistz256_sub_morf,%function
1170 __ecp_nistz256_sub_morf:
1171 str lr,[sp,#-4]! @ push lr
1176 ldr $t3,[$b_ptr,#12]
1178 ldr $t0,[$b_ptr,#16]
1180 ldr $t1,[$b_ptr,#20]
1182 ldr $t2,[$b_ptr,#24]
1184 ldr $t3,[$b_ptr,#28]
1189 sbc $ff,$ff,$ff @ broadcast borrow bit
1190 ldr lr,[sp],#4 @ pop lr
1192 adds $a0,$a0,$ff @ add synthesized modulus
1200 str $a3,[$r_ptr,#12]
1202 str $a4,[$r_ptr,#16]
1203 adcs $a6,$a6,$ff,lsr#31
1204 str $a5,[$r_ptr,#20]
1206 str $a6,[$r_ptr,#24]
1207 str $a7,[$r_ptr,#28]
1210 .size __ecp_nistz256_sub_morf,.-__ecp_nistz256_sub_morf
1212 .type __ecp_nistz256_add_self,%function
1214 __ecp_nistz256_add_self:
1215 adds $a0,$a0,$a0 @ a[0:7]+=a[0:7]
1226 @ if a+b >= modulus, subtract modulus.
1228 @ But since comparison implies subtraction, we subtract
1229 @ modulus and then add it back if subtraction borrowed.
1241 @ Note that because mod has special form, i.e. consists of
1242 @ 0xffffffff, 1 and 0s, we can conditionally synthesize it by
1243 @ using value of borrow as a whole or extracting single bit.
1244 @ Follow $ff register...
1246 adds $a0,$a0,$ff @ add synthesized modulus
1254 str $a3,[$r_ptr,#12]
1256 str $a4,[$r_ptr,#16]
1257 adcs $a6,$a6,$ff,lsr#31
1258 str $a5,[$r_ptr,#20]
1260 str $a6,[$r_ptr,#24]
1261 str $a7,[$r_ptr,#28]
1264 .size __ecp_nistz256_add_self,.-__ecp_nistz256_add_self
1268 ########################################################################
1269 # following subroutines are "literal" implementation of those found in
1272 ########################################################################
1273 # void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp);
1276 my ($S,$M,$Zsqr,$in_x,$tmp0)=map(32*$_,(0..4));
1277 # above map() describes stack layout with 5 temporary
1278 # 256-bit vectors on top. Then note that we push
1279 # starting from r0, which means that we have copy of
1280 # input arguments just below these temporary vectors.
1283 .globl ecp_nistz256_point_double
1284 .type ecp_nistz256_point_double,%function
1286 ecp_nistz256_point_double:
1287 stmdb sp!,{r0-r12,lr} @ push from r0, unusual, but intentional
1290 .Lpoint_double_shortcut:
1292 ldmia $a_ptr!,{r4-r11} @ copy in_x
1296 bl __ecp_nistz256_mul_by_2 @ p256_mul_by_2(S, in_y);
1298 add $b_ptr,$a_ptr,#32
1299 add $a_ptr,$a_ptr,#32
1300 add $r_ptr,sp,#$Zsqr
1301 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(Zsqr, in_z);
1306 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(S, S);
1308 ldr $b_ptr,[sp,#32*5+4]
1309 add $a_ptr,$b_ptr,#32
1310 add $b_ptr,$b_ptr,#64
1311 add $r_ptr,sp,#$tmp0
1312 bl __ecp_nistz256_mul_mont @ p256_mul_mont(tmp0, in_z, in_y);
1314 ldr $r_ptr,[sp,#32*5]
1315 add $r_ptr,$r_ptr,#64
1316 bl __ecp_nistz256_add_self @ p256_mul_by_2(res_z, tmp0);
1318 add $a_ptr,sp,#$in_x
1319 add $b_ptr,sp,#$Zsqr
1321 bl __ecp_nistz256_add @ p256_add(M, in_x, Zsqr);
1323 add $a_ptr,sp,#$in_x
1324 add $b_ptr,sp,#$Zsqr
1325 add $r_ptr,sp,#$Zsqr
1326 bl __ecp_nistz256_sub @ p256_sub(Zsqr, in_x, Zsqr);
1330 add $r_ptr,sp,#$tmp0
1331 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(tmp0, S);
1333 add $a_ptr,sp,#$Zsqr
1336 bl __ecp_nistz256_mul_mont @ p256_mul_mont(M, M, Zsqr);
1338 ldr $r_ptr,[sp,#32*5]
1339 add $a_ptr,sp,#$tmp0
1340 add $r_ptr,$r_ptr,#32
1341 bl __ecp_nistz256_div_by_2 @ p256_div_by_2(res_y, tmp0);
1345 bl __ecp_nistz256_mul_by_3 @ p256_mul_by_3(M, M);
1347 add $a_ptr,sp,#$in_x
1350 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S, S, in_x);
1352 add $r_ptr,sp,#$tmp0
1353 bl __ecp_nistz256_add_self @ p256_mul_by_2(tmp0, S);
1355 ldr $r_ptr,[sp,#32*5]
1358 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(res_x, M);
1360 add $b_ptr,sp,#$tmp0
1361 bl __ecp_nistz256_sub_from @ p256_sub(res_x, res_x, tmp0);
1365 bl __ecp_nistz256_sub_morf @ p256_sub(S, S, res_x);
1369 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S, S, M);
1371 ldr $r_ptr,[sp,#32*5]
1372 add $b_ptr,$r_ptr,#32
1373 add $r_ptr,$r_ptr,#32
1374 bl __ecp_nistz256_sub_from @ p256_sub(res_y, S, res_y);
1376 add sp,sp,#32*5+16 @ +16 means "skip even over saved r0-r3"
1377 #if __ARM_ARCH__>=5 || !defined(__thumb__)
1378 ldmia sp!,{r4-r12,pc}
1380 ldmia sp!,{r4-r12,lr}
1381 bx lr @ interoperable with Thumb ISA:-)
1383 .size ecp_nistz256_point_double,.-ecp_nistz256_point_double
1387 ########################################################################
1388 # void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1,
1389 # const P256_POINT *in2);
1391 my ($res_x,$res_y,$res_z,
1392 $in1_x,$in1_y,$in1_z,
1393 $in2_x,$in2_y,$in2_z,
1394 $H,$Hsqr,$R,$Rsqr,$Hcub,
1395 $U1,$U2,$S1,$S2)=map(32*$_,(0..17));
1396 my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
1397 # above map() describes stack layout with 18 temporary
1398 # 256-bit vectors on top. Then note that we push
1399 # starting from r0, which means that we have copy of
1400 # input arguments just below these temporary vectors.
1401 # We use three of them for ~in1infty, ~in2infty and
1402 # result of check for zero.
1405 .globl ecp_nistz256_point_add
1406 .type ecp_nistz256_point_add,%function
1408 ecp_nistz256_point_add:
1409 stmdb sp!,{r0-r12,lr} @ push from r0, unusual, but intentional
1412 ldmia $b_ptr!,{r4-r11} @ copy in2_x
1415 ldmia $b_ptr!,{r4-r11} @ copy in2_y
1417 ldmia $b_ptr,{r4-r11} @ copy in2_z
1431 str r12,[sp,#32*18+8] @ ~in2infty
1433 ldmia $a_ptr!,{r4-r11} @ copy in1_x
1436 ldmia $a_ptr!,{r4-r11} @ copy in1_y
1438 ldmia $a_ptr,{r4-r11} @ copy in1_z
1452 str r12,[sp,#32*18+4] @ ~in1infty
1454 add $a_ptr,sp,#$in2_z
1455 add $b_ptr,sp,#$in2_z
1456 add $r_ptr,sp,#$Z2sqr
1457 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(Z2sqr, in2_z);
1459 add $a_ptr,sp,#$in1_z
1460 add $b_ptr,sp,#$in1_z
1461 add $r_ptr,sp,#$Z1sqr
1462 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(Z1sqr, in1_z);
1464 add $a_ptr,sp,#$in2_z
1465 add $b_ptr,sp,#$Z2sqr
1467 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S1, Z2sqr, in2_z);
1469 add $a_ptr,sp,#$in1_z
1470 add $b_ptr,sp,#$Z1sqr
1472 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S2, Z1sqr, in1_z);
1474 add $a_ptr,sp,#$in1_y
1477 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S1, S1, in1_y);
1479 add $a_ptr,sp,#$in2_y
1482 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S2, S2, in2_y);
1486 bl __ecp_nistz256_sub_from @ p256_sub(R, S2, S1);
1488 orr $a0,$a0,$a1 @ see if result is zero
1494 add $a_ptr,sp,#$in1_x
1496 add $b_ptr,sp,#$Z2sqr
1497 str $a0,[sp,#32*18+12]
1500 bl __ecp_nistz256_mul_mont @ p256_mul_mont(U1, in1_x, Z2sqr);
1502 add $a_ptr,sp,#$in2_x
1503 add $b_ptr,sp,#$Z1sqr
1505 bl __ecp_nistz256_mul_mont @ p256_mul_mont(U2, in2_x, Z1sqr);
1509 bl __ecp_nistz256_sub_from @ p256_sub(H, U2, U1);
1511 orr $a0,$a0,$a1 @ see if result is zero
1517 orr $a0,$a0,$a4 @ ~is_equal(U1,U2)
1519 ldr $t0,[sp,#32*18+4] @ ~in1infty
1520 ldr $t1,[sp,#32*18+8] @ ~in2infty
1521 ldr $t2,[sp,#32*18+12] @ ~is_equal(S1,S2)
1522 mvn $t0,$t0 @ -1/0 -> 0/-1
1523 mvn $t1,$t1 @ -1/0 -> 0/-1
1526 orrs $a0,$t2 @ set flags
1528 @ if(~is_equal(U1,U2) | in1infty | in2infty | ~is_equal(S1,S2))
1532 ldr $a_ptr,[sp,#32*18+20]
1533 add sp,sp,#32*(18-5)+16 @ difference in frame sizes
1534 b .Lpoint_double_shortcut
1540 add $r_ptr,sp,#$Rsqr
1541 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(Rsqr, R);
1544 add $b_ptr,sp,#$in1_z
1545 add $r_ptr,sp,#$res_z
1546 bl __ecp_nistz256_mul_mont @ p256_mul_mont(res_z, H, in1_z);
1550 add $r_ptr,sp,#$Hsqr
1551 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(Hsqr, H);
1553 add $a_ptr,sp,#$in2_z
1554 add $b_ptr,sp,#$res_z
1555 add $r_ptr,sp,#$res_z
1556 bl __ecp_nistz256_mul_mont @ p256_mul_mont(res_z, res_z, in2_z);
1559 add $b_ptr,sp,#$Hsqr
1560 add $r_ptr,sp,#$Hcub
1561 bl __ecp_nistz256_mul_mont @ p256_mul_mont(Hcub, Hsqr, H);
1563 add $a_ptr,sp,#$Hsqr
1566 bl __ecp_nistz256_mul_mont @ p256_mul_mont(U2, U1, Hsqr);
1568 add $r_ptr,sp,#$Hsqr
1569 bl __ecp_nistz256_add_self @ p256_mul_by_2(Hsqr, U2);
1571 add $b_ptr,sp,#$Rsqr
1572 add $r_ptr,sp,#$res_x
1573 bl __ecp_nistz256_sub_morf @ p256_sub(res_x, Rsqr, Hsqr);
1575 add $b_ptr,sp,#$Hcub
1576 bl __ecp_nistz256_sub_from @ p256_sub(res_x, res_x, Hcub);
1579 add $r_ptr,sp,#$res_y
1580 bl __ecp_nistz256_sub_morf @ p256_sub(res_y, U2, res_x);
1582 add $a_ptr,sp,#$Hcub
1585 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S2, S1, Hcub);
1588 add $b_ptr,sp,#$res_y
1589 add $r_ptr,sp,#$res_y
1590 bl __ecp_nistz256_mul_mont @ p256_mul_mont(res_y, res_y, R);
1593 bl __ecp_nistz256_sub_from @ p256_sub(res_y, res_y, S2);
1595 ldr r11,[sp,#32*18+4] @ ~in1infty
1596 ldr r12,[sp,#32*18+8] @ ~in2infty
1599 and r10,r11,r12 @ ~in1infty & ~in2infty
1602 and r11,r11,r12 @ in1infty & ~in2infty
1603 mvn r12,r12 @ in2infty
1604 ldr $r_ptr,[sp,#32*18+16]
1606 for($i=0;$i<96;$i+=8) { # conditional moves
1608 ldmia r1!,{r4-r5} @ res_x
1609 ldmia r2!,{r6-r7} @ in2_x
1610 ldmia r3!,{r8-r9} @ in1_x
1611 and r4,r4,r10 @ ~in1infty & ~in2infty
1613 and r6,r6,r11 @ in1infty & ~in2infty
1615 and r8,r8,r12 @ in2infty
1621 stmia $r_ptr!,{r4-r5}
1626 add sp,sp,#32*18+16+16 @ +16 means "skip even over saved r0-r3"
1627 #if __ARM_ARCH__>=5 || !defined(__thumb__)
1628 ldmia sp!,{r4-r12,pc}
1630 ldmia sp!,{r4-r12,lr}
1631 bx lr @ interoperable with Thumb ISA:-)
1633 .size ecp_nistz256_point_add,.-ecp_nistz256_point_add
1637 ########################################################################
1638 # void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1,
1639 # const P256_POINT_AFFINE *in2);
1641 my ($res_x,$res_y,$res_z,
1642 $in1_x,$in1_y,$in1_z,
1644 $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(32*$_,(0..14));
1646 # above map() describes stack layout with 18 temporary
1647 # 256-bit vectors on top. Then note that we push
1648 # starting from r0, which means that we have copy of
1649 # input arguments just below these temporary vectors.
1650 # We use two of them for ~in1infty, ~in2infty.
1652 my @ONE_mont=(1,0,0,-1,-1,-1,-2,0);
1655 .globl ecp_nistz256_point_add_affine
1656 .type ecp_nistz256_point_add_affine,%function
1658 ecp_nistz256_point_add_affine:
1659 stmdb sp!,{r0-r12,lr} @ push from r0, unusual, but intentional
1662 ldmia $a_ptr!,{r4-r11} @ copy in1_x
1665 ldmia $a_ptr!,{r4-r11} @ copy in1_y
1667 ldmia $a_ptr,{r4-r11} @ copy in1_z
1681 str r12,[sp,#32*15+4] @ ~in1infty
1683 ldmia $b_ptr!,{r4-r11} @ copy in2_x
1693 ldmia $b_ptr!,{r4-r11} @ copy in2_y
1708 str r12,[sp,#32*15+8] @ ~in2infty
1710 add $a_ptr,sp,#$in1_z
1711 add $b_ptr,sp,#$in1_z
1712 add $r_ptr,sp,#$Z1sqr
1713 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(Z1sqr, in1_z);
1715 add $a_ptr,sp,#$Z1sqr
1716 add $b_ptr,sp,#$in2_x
1718 bl __ecp_nistz256_mul_mont @ p256_mul_mont(U2, Z1sqr, in2_x);
1720 add $b_ptr,sp,#$in1_x
1722 bl __ecp_nistz256_sub_from @ p256_sub(H, U2, in1_x);
1724 add $a_ptr,sp,#$Z1sqr
1725 add $b_ptr,sp,#$in1_z
1727 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S2, Z1sqr, in1_z);
1730 add $b_ptr,sp,#$in1_z
1731 add $r_ptr,sp,#$res_z
1732 bl __ecp_nistz256_mul_mont @ p256_mul_mont(res_z, H, in1_z);
1734 add $a_ptr,sp,#$in2_y
1737 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S2, S2, in2_y);
1739 add $b_ptr,sp,#$in1_y
1741 bl __ecp_nistz256_sub_from @ p256_sub(R, S2, in1_y);
1745 add $r_ptr,sp,#$Hsqr
1746 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(Hsqr, H);
1750 add $r_ptr,sp,#$Rsqr
1751 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(Rsqr, R);
1754 add $b_ptr,sp,#$Hsqr
1755 add $r_ptr,sp,#$Hcub
1756 bl __ecp_nistz256_mul_mont @ p256_mul_mont(Hcub, Hsqr, H);
1758 add $a_ptr,sp,#$Hsqr
1759 add $b_ptr,sp,#$in1_x
1761 bl __ecp_nistz256_mul_mont @ p256_mul_mont(U2, in1_x, Hsqr);
1763 add $r_ptr,sp,#$Hsqr
1764 bl __ecp_nistz256_add_self @ p256_mul_by_2(Hsqr, U2);
1766 add $b_ptr,sp,#$Rsqr
1767 add $r_ptr,sp,#$res_x
1768 bl __ecp_nistz256_sub_morf @ p256_sub(res_x, Rsqr, Hsqr);
1770 add $b_ptr,sp,#$Hcub
1771 bl __ecp_nistz256_sub_from @ p256_sub(res_x, res_x, Hcub);
1774 add $r_ptr,sp,#$res_y
1775 bl __ecp_nistz256_sub_morf @ p256_sub(res_y, U2, res_x);
1777 add $a_ptr,sp,#$Hcub
1778 add $b_ptr,sp,#$in1_y
1780 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S2, in1_y, Hcub);
1783 add $b_ptr,sp,#$res_y
1784 add $r_ptr,sp,#$res_y
1785 bl __ecp_nistz256_mul_mont @ p256_mul_mont(res_y, res_y, R);
1788 bl __ecp_nistz256_sub_from @ p256_sub(res_y, res_y, S2);
1790 ldr r11,[sp,#32*15+4] @ ~in1infty
1791 ldr r12,[sp,#32*15+8] @ ~in2infty
1794 and r10,r11,r12 @ ~in1infty & ~in2infty
1797 and r11,r11,r12 @ in1infty & ~in2infty
1798 mvn r12,r12 @ in2infty
1799 ldr $r_ptr,[sp,#32*15]
1801 for($i=0;$i<64;$i+=8) { # conditional moves
1803 ldmia r1!,{r4-r5} @ res_x
1804 ldmia r2!,{r6-r7} @ in2_x
1805 ldmia r3!,{r8-r9} @ in1_x
1806 and r4,r4,r10 @ ~in1infty & ~in2infty
1808 and r6,r6,r11 @ in1infty & ~in2infty
1810 and r8,r8,r12 @ in2infty
1816 stmia $r_ptr!,{r4-r5}
1822 ldmia r1!,{r4-r5} @ res_z
1823 ldmia r3!,{r8-r9} @ in1_z
1826 and r6,r11,#@ONE_mont[$j]
1827 and r7,r11,#@ONE_mont[$j+1]
1834 stmia $r_ptr!,{r4-r5}
1838 add sp,sp,#32*15+16 @ +16 means "skip even over saved r0-r3"
1839 #if __ARM_ARCH__>=5 || !defined(__thumb__)
1840 ldmia sp!,{r4-r12,pc}
1842 ldmia sp!,{r4-r12,lr}
1843 bx lr @ interoperable with Thumb ISA:-)
1845 .size ecp_nistz256_point_add_affine,.-ecp_nistz256_point_add_affine
1849 foreach (split("\n",$code)) {
1850 s/\`([^\`]*)\`/eval $1/geo;
1852 s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo;
1856 close STDOUT or die "error closing STDOUT: $!"; # enforce flush