2 # Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
19 # Montgomery multiplication for ARMv4.
21 # Performance improvement naturally varies among CPU implementations
22 # and compilers. The code was observed to provide +65-35% improvement
23 # [depending on key length, less for longer keys] on ARM920T, and
24 # +115-80% on Intel IXP425. This is compared to pre-bn_mul_mont code
25 # base and compiler generated code with in-lined umull and even umlal
26 # instructions. The latter means that this code didn't really have an
27 # "advantage" of utilizing some "secret" instruction.
29 # The code is interoperable with Thumb ISA and is rather compact, less
30 # than 1/2KB. Windows CE port would be trivial, as it's exclusively
31 # about decorations, ABI and instruction syntax are identical.
35 # Add NEON code path, which handles lengths divisible by 8. RSA/DSA
36 # performance improvement on Cortex-A8 is ~45-100% depending on key
37 # length, more for longer keys. On Cortex-A15 the span is ~10-105%.
38 # On Snapdragon S4 improvement was measured to vary from ~70% to
39 # incredible ~380%, yes, 4.8x faster, for RSA4096 sign. But this is
40 # rather because original integer-only code seems to perform
41 # suboptimally on S4. Situation on Cortex-A9 is unfortunately
42 # different. It's being looked into, but the trouble is that
43 # performance for vectors longer than 256 bits is actually couple
44 # of percent worse than for integer-only code. The code is chosen
45 # for execution on all NEON-capable processors, because gain on
46 # others outweighs the marginal loss on Cortex-A9.
50 # Align Cortex-A9 performance with November 2013 improvements, i.e.
51 # NEON code is now ~20-105% faster than integer-only one on this
52 # processor. But this optimization further improved performance even
53 # on other processors: NEON code path is ~45-180% faster than original
54 # integer-only on Cortex-A8, ~10-210% on Cortex-A15, ~70-450% on
58 if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
59 else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
61 if ($flavour && $flavour ne "void") {
62 $0 =~ m/(.*[\/\\])[^\
/\\]+$/; $dir=$1;
63 ( $xlate="${dir}arm-xlate.pl" and -f
$xlate ) or
64 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f
$xlate) or
65 die "can't locate arm-xlate.pl";
67 open STDOUT
,"| \"$^X\" $xlate $flavour $output";
69 open STDOUT
,">$output";
72 $num="r0"; # starts as num argument, but holds &tp[num-1]
74 $bp="r2"; $bi="r2"; $rp="r2";
81 ########### # r9 is reserved by ELF as platform specific, e.g. TLS pointer
82 $alo="r10"; # sl, gcc uses it to keep @GOT
85 ########### # r13 is stack pointer
87 ########### # r15 is program counter
89 #### argument block layout relative to &tp[num-1], a.k.a. $num
91 # ap permanently resides in r1
93 # np permanently resides in r3
95 $_num="$num,#15*4"; $_bpend=$_num;
101 #if defined(__thumb2__)
108 #if __ARM_MAX_ARCH__>=7
111 .word OPENSSL_armcap_P
-.Lbn_mul_mont
115 .type bn_mul_mont
,%function
120 ldr ip
,[sp
,#4] @ load num
121 stmdb sp
!,{r0
,r2
} @ sp points at argument block
122 #if __ARM_MAX_ARCH__>=7
126 ldr r2
,.LOPENSSL_armcap
131 tst r0
,#ARMV7_NEON @ NEON available?
140 mov
$num,ip @ load num
148 stmdb sp
!,{r4
-r12
,lr
} @ save
10 registers
150 mov
$num,$num,lsl
#2 @ rescale $num for byte count
151 sub sp
,sp
,$num @ alloca
(4*num
)
152 sub sp
,sp
,#4 @ +extra dword
153 sub $num,$num,#4 @ "num=num-1"
154 add
$tp,$bp,$num @
&bp
[num
-1]
156 add
$num,sp
,$num @
$num to point at
&tp
[num
-1]
158 ldr
$bi,[$bp] @ bp
[0]
159 ldr
$aj,[$ap],#4 @ ap[0],ap++
160 ldr
$nj,[$np],#4 @ np[0],np++
162 str
$tp,[$_bpend] @ save
&bp
[num
]
164 umull
$alo,$ahi,$aj,$bi @ ap
[0]*bp
[0]
165 str
$n0,[$_n0] @ save n0 value
166 mul
$n0,$alo,$n0 @
"tp[0]"*n0
168 umlal
$alo,$nlo,$nj,$n0 @ np
[0]*n0
+"t[0]"
172 ldr
$aj,[$ap],#4 @ ap[j],ap++
174 ldr
$nj,[$np],#4 @ np[j],np++
176 umlal
$alo,$ahi,$aj,$bi @ ap
[j
]*bp
[0]
178 umlal
$nlo,$nhi,$nj,$n0 @ np
[j
]*n0
180 str
$nlo,[$tp],#4 @ tp[j-1]=,tp++
186 ldr
$tp,[$_bp] @ restore bp
188 ldr
$n0,[$_n0] @ restore n0
190 str
$nlo,[$num] @ tp
[num
-1]=
192 str
$nhi,[$num,#4] @ tp[num]=
195 sub $tj,$num,$tj @
"original" $num-1 value
196 sub $ap,$ap,$tj @
"rewind" ap to
&ap
[1]
197 ldr
$bi,[$tp,#4]! @ *(++bp)
198 sub $np,$np,$tj @
"rewind" np to
&np
[1]
199 ldr
$aj,[$ap,#-4] @ ap[0]
200 ldr
$alo,[sp
] @ tp
[0]
201 ldr
$nj,[$np,#-4] @ np[0]
202 ldr
$tj,[sp
,#4] @ tp[1]
205 umlal
$alo,$ahi,$aj,$bi @ ap
[0]*bp
[i
]+tp
[0]
206 str
$tp,[$_bp] @ save bp
209 umlal
$alo,$nlo,$nj,$n0 @ np
[0]*n0
+"tp[0]"
213 ldr
$aj,[$ap],#4 @ ap[j],ap++
214 adds
$alo,$ahi,$tj @
+=tp
[j
]
215 ldr
$nj,[$np],#4 @ np[j],np++
217 umlal
$alo,$ahi,$aj,$bi @ ap
[j
]*bp
[i
]
219 umlal
$nlo,$nhi,$nj,$n0 @ np
[j
]*n0
221 ldr
$tj,[$tp,#8] @ tp[j+1]
223 str
$nlo,[$tp],#4 @ tp[j-1]=,tp++
230 ldr
$tp,[$_bp] @ restore bp
232 ldr
$n0,[$_n0] @ restore n0
234 ldr
$tj,[$_bpend] @ restore
&bp
[num
]
236 str
$nlo,[$num] @ tp
[num
-1]=
237 str
$nhi,[$num,#4] @ tp[num]=
246 ldr
$rp,[$_rp] @ pull rp
248 add
$num,$num,#4 @ $num to point at &tp[num]
249 sub $aj,$num,$aj @
"original" num value
250 mov
$tp,sp @
"rewind" $tp
251 mov
$ap,$tp @
"borrow" $ap
252 sub $np,$np,$aj @
"rewind" $np to
&np
[0]
254 subs
$tj,$tj,$tj @
"clear" carry flag
255 .Lsub
: ldr
$tj,[$tp],#4
257 sbcs
$tj,$tj,$nj @ tp
[j
]-np
[j
]
258 str
$tj,[$rp],#4 @ rp[j]=
259 teq
$tp,$num @ preserve carry
261 sbcs
$nhi,$nhi,#0 @ upmost carry
262 mov
$tp,sp @
"rewind" $tp
263 sub $rp,$rp,$aj @
"rewind" $rp
267 orr
$ap,$ap,$np @ ap
=borrow?tp
:rp
269 .Lcopy
: ldr
$tj,[$ap],#4 @ copy or in-place refresh
270 str sp
,[$tp],#4 @ zap tp
276 add sp
,sp
,#4 @ skip over tp[num+1]
277 ldmia sp
!,{r4
-r12
,lr
} @ restore registers
278 add sp
,sp
,#2*4 @ skip over {r0,r2}
285 moveq pc
,lr @ be binary compatible with V4
, yet
286 bx lr @ interoperable with Thumb ISA
:-)
288 .size bn_mul_mont
,.-bn_mul_mont
291 my ($A0,$A1,$A2,$A3)=map("d$_",(0..3));
292 my ($N0,$N1,$N2,$N3)=map("d$_",(4..7));
293 my ($Z,$Temp)=("q4","q5");
294 my @ACC=map("q$_",(6..13));
295 my ($Bi,$Ni,$M0)=map("d$_",(28..31));
299 my ($rptr,$aptr,$bptr,$nptr,$n0,$num)=map("r$_",(0..5));
300 my ($tinptr,$toutptr,$inner,$outer,$bnptr)=map("r$_",(6..11));
303 #if __ARM_MAX_ARCH__>=7
307 .type bn_mul8x_mont_neon
,%function
312 vstmdb sp
!,{d8
-d15
} @ ABI specification says so
313 ldmia ip
,{r4
-r5
} @ load rest of parameter block
319 @ special case
for $num==8, everything is
in register bank
...
321 vld1
.32
{${Bi
}[0]}, [$bptr,:32]!
322 veor
$zero,$zero,$zero
323 sub $toutptr,sp
,$num,lsl
#4
324 vld1
.32
{$A0-$A3}, [$aptr]! @ can
't specify :32 :-(
325 and $toutptr,$toutptr,#-64
326 vld1.32 {${M0}[0]}, [$n0,:32]
327 mov sp,$toutptr @ alloca
330 vmull.u32 @ACC[0],$Bi,${A0}[0]
331 vmull.u32 @ACC[1],$Bi,${A0}[1]
332 vmull.u32 @ACC[2],$Bi,${A1}[0]
333 vshl.i64 $Ni,@ACC[0]#hi,#16
334 vmull.u32 @ACC[3],$Bi,${A1}[1]
336 vadd.u64 $Ni,$Ni,@ACC[0]#lo
337 veor $zero,$zero,$zero
340 vmull.u32 @ACC[4],$Bi,${A2}[0]
341 vld1.32 {$N0-$N3}, [$nptr]!
342 vmull.u32 @ACC[5],$Bi,${A2}[1]
343 vmull.u32 @ACC[6],$Bi,${A3}[0]
345 vmull.u32 @ACC[7],$Bi,${A3}[1]
347 vmlal.u32 @ACC[0],$Ni,${N0}[0]
349 vmlal.u32 @ACC[1],$Ni,${N0}[1]
350 vmlal.u32 @ACC[2],$Ni,${N1}[0]
351 vmlal.u32 @ACC[3],$Ni,${N1}[1]
353 vmlal.u32 @ACC[4],$Ni,${N2}[0]
355 vmlal.u32 @ACC[5],$Ni,${N2}[1]
357 vmlal.u32 @ACC[6],$Ni,${N3}[0]
359 vmlal.u32 @ACC[7],$Ni,${N3}[1]
362 vshr.u64 $temp,$temp,#16
365 vadd.u64 $temp,$temp,$Temp#hi
368 vshr.u64 $temp,$temp,#16
374 vld1.32 {${Bi}[0]}, [$bptr,:32]!
375 veor $zero,$zero,$zero
377 vadd.u64 @ACC[0]#lo,@ACC[0]#lo,$temp
379 vmlal.u32 @ACC[0],$Bi,${A0}[0]
380 vmlal.u32 @ACC[1],$Bi,${A0}[1]
381 vmlal.u32 @ACC[2],$Bi,${A1}[0]
382 vshl.i64 $Ni,@ACC[0]#hi,#16
383 vmlal.u32 @ACC[3],$Bi,${A1}[1]
385 vadd.u64 $Ni,$Ni,@ACC[0]#lo
386 veor $zero,$zero,$zero
387 subs $outer,$outer,#1
390 vmlal.u32 @ACC[4],$Bi,${A2}[0]
391 vmlal.u32 @ACC[5],$Bi,${A2}[1]
392 vmlal.u32 @ACC[6],$Bi,${A3}[0]
394 vmlal.u32 @ACC[7],$Bi,${A3}[1]
396 vmlal.u32 @ACC[0],$Ni,${N0}[0]
397 vmlal.u32 @ACC[1],$Ni,${N0}[1]
398 vmlal.u32 @ACC[2],$Ni,${N1}[0]
399 vmlal.u32 @ACC[3],$Ni,${N1}[1]
401 vmlal.u32 @ACC[4],$Ni,${N2}[0]
403 vmlal.u32 @ACC[5],$Ni,${N2}[1]
405 vmlal.u32 @ACC[6],$Ni,${N3}[0]
407 vmlal.u32 @ACC[7],$Ni,${N3}[1]
410 vshr.u64 $temp,$temp,#16
413 vadd.u64 $temp,$temp,$Temp#hi
416 vshr.u64 $temp,$temp,#16
420 vadd.u64 @ACC[0]#lo,@ACC[0]#lo,$temp
422 vshr.u64 $temp,@ACC[0]#lo,#16
424 vadd.u64 @ACC[0]#hi,@ACC[0]#hi,$temp
426 vshr.u64 $temp,@ACC[0]#hi,#16
427 vzip.16 @ACC[0]#lo,@ACC[0]#hi
433 veor @ACC[0],@ACC[0],@ACC[0]
435 veor @ACC[1],@ACC[1],@ACC[1]
436 sub $toutptr,$toutptr,$num,lsl#4
437 veor @ACC[2],@ACC[2],@ACC[2]
438 and $toutptr,$toutptr,#-64
439 veor @ACC[3],@ACC[3],@ACC[3]
440 mov sp,$toutptr @ alloca
441 veor @ACC[4],@ACC[4],@ACC[4]
442 add $toutptr,$toutptr,#256
443 veor @ACC[5],@ACC[5],@ACC[5]
445 veor @ACC[6],@ACC[6],@ACC[6]
446 veor @ACC[7],@ACC[7],@ACC[7]
449 vst1.64 {@ACC[0]-@ACC[1]},[$toutptr,:256]!
450 subs $inner,$inner,#8
451 vst1.64 {@ACC[2]-@ACC[3]},[$toutptr,:256]!
452 vst1.64 {@ACC[4]-@ACC[5]},[$toutptr,:256]!
453 vst1.64 {@ACC[6]-@ACC[7]},[$toutptr,:256]!
457 vld1.32 {$A0-$A3},[$aptr]!
459 vld1.32 {${M0}[0]},[$n0,:32]
465 vld1.32 {${Bi}[0]},[$bptr,:32]! @ *b++
466 veor $zero,$zero,$zero
469 vld1.32 {$N0-$N3},[$nptr]!
471 vmlal.u32 @ACC[0],$Bi,${A0}[0]
472 vmlal.u32 @ACC[1],$Bi,${A0}[1]
473 veor $zero,$zero,$zero
474 vmlal.u32 @ACC[2],$Bi,${A1}[0]
475 vshl.i64 $Ni,@ACC[0]#hi,#16
476 vmlal.u32 @ACC[3],$Bi,${A1}[1]
477 vadd.u64 $Ni,$Ni,@ACC[0]#lo
478 vmlal.u32 @ACC[4],$Bi,${A2}[0]
480 vmlal.u32 @ACC[5],$Bi,${A2}[1]
481 vst1.32 {$Bi},[sp,:64] @ put aside smashed b[8*i+0]
482 vmlal.u32 @ACC[6],$Bi,${A3}[0]
484 vmlal.u32 @ACC[7],$Bi,${A3}[1]
488 vld1.32 {${Bi}[0]},[$bptr,:32]! @ *b++
489 vmlal.u32 @ACC[0],$Ni,${N0}[0]
490 veor $temp,$temp,$temp
491 vmlal.u32 @ACC[1],$Ni,${N0}[1]
493 vmlal.u32 @ACC[2],$Ni,${N1}[0]
494 vshr.u64 @ACC[0]#lo,@ACC[0]#lo,#16
495 vmlal.u32 @ACC[3],$Ni,${N1}[1]
496 vmlal.u32 @ACC[4],$Ni,${N2}[0]
497 vadd.u64 @ACC[0]#lo,@ACC[0]#lo,@ACC[0]#hi
498 vmlal.u32 @ACC[5],$Ni,${N2}[1]
499 vshr.u64 @ACC[0]#lo,@ACC[0]#lo,#16
500 vmlal.u32 @ACC[6],$Ni,${N3}[0]
501 vmlal.u32 @ACC[7],$Ni,${N3}[1]
502 vadd.u64 @ACC[1]#lo,@ACC[1]#lo,@ACC[0]#lo
503 vst1.32 {$Ni},[$bnptr,:64]! @ put aside smashed m[8*i+$i]
505 push(@ACC,shift(@ACC)); $i++;
507 vmlal.u32 @ACC[0],$Bi,${A0}[0]
508 vld1.64 {@ACC[7]},[$tinptr,:128]!
509 vmlal.u32 @ACC[1],$Bi,${A0}[1]
510 veor $zero,$zero,$zero
511 vmlal.u32 @ACC[2],$Bi,${A1}[0]
512 vshl.i64 $Ni,@ACC[0]#hi,#16
513 vmlal.u32 @ACC[3],$Bi,${A1}[1]
514 vadd.u64 $Ni,$Ni,@ACC[0]#lo
515 vmlal.u32 @ACC[4],$Bi,${A2}[0]
517 vmlal.u32 @ACC[5],$Bi,${A2}[1]
518 vst1.32 {$Bi},[$bnptr,:64]! @ put aside smashed b[8*i+$i]
519 vmlal.u32 @ACC[6],$Bi,${A3}[0]
521 vmlal.u32 @ACC[7],$Bi,${A3}[1]
525 vld1.32 {$Bi},[sp,:64] @ pull smashed b[8*i+0]
526 vmlal.u32 @ACC[0],$Ni,${N0}[0]
527 vld1.32 {$A0-$A3},[$aptr]!
528 vmlal.u32 @ACC[1],$Ni,${N0}[1]
529 vmlal.u32 @ACC[2],$Ni,${N1}[0]
530 vshr.u64 @ACC[0]#lo,@ACC[0]#lo,#16
531 vmlal.u32 @ACC[3],$Ni,${N1}[1]
532 vmlal.u32 @ACC[4],$Ni,${N2}[0]
533 vadd.u64 @ACC[0]#lo,@ACC[0]#lo,@ACC[0]#hi
534 vmlal.u32 @ACC[5],$Ni,${N2}[1]
535 vshr.u64 @ACC[0]#lo,@ACC[0]#lo,#16
536 vmlal.u32 @ACC[6],$Ni,${N3}[0]
537 vmlal.u32 @ACC[7],$Ni,${N3}[1]
538 vadd.u64 @ACC[1]#lo,@ACC[1]#lo,@ACC[0]#lo
539 vst1.32 {$Ni},[$bnptr,:64] @ put aside smashed m[8*i+$i]
540 add $bnptr,sp,#8 @ rewind
542 push(@ACC,shift(@ACC));
549 subs $inner,$inner,#8
550 vmlal.u32 @ACC[0],$Bi,${A0}[0]
551 vld1.64 {@ACC[7]},[$tinptr,:128]
552 vmlal.u32 @ACC[1],$Bi,${A0}[1]
553 vld1.32 {$Ni},[$bnptr,:64]! @ pull smashed m[8*i+0]
554 vmlal.u32 @ACC[2],$Bi,${A1}[0]
555 vld1.32 {$N0-$N3},[$nptr]!
556 vmlal.u32 @ACC[3],$Bi,${A1}[1]
558 addne $tinptr,$tinptr,#16 @ don't advance
in last iteration
559 vmlal
.u32
@ACC[4],$Bi,${A2
}[0]
560 vmlal
.u32
@ACC[5],$Bi,${A2
}[1]
561 vmlal
.u32
@ACC[6],$Bi,${A3
}[0]
562 vmlal
.u32
@ACC[7],$Bi,${A3
}[1]
564 for ($i=1; $i<8; $i++) {
566 vld1
.32
{$Bi},[$bnptr,:64]! @ pull smashed b
[8*i
+$i]
567 vmlal
.u32
@ACC[0],$Ni,${N0
}[0]
568 vmlal
.u32
@ACC[1],$Ni,${N0
}[1]
569 vmlal
.u32
@ACC[2],$Ni,${N1
}[0]
570 vmlal
.u32
@ACC[3],$Ni,${N1
}[1]
571 vmlal
.u32
@ACC[4],$Ni,${N2
}[0]
572 vmlal
.u32
@ACC[5],$Ni,${N2
}[1]
573 vmlal
.u32
@ACC[6],$Ni,${N3
}[0]
574 vmlal
.u32
@ACC[7],$Ni,${N3
}[1]
575 vst1
.64
{@ACC[0]},[$toutptr,:128]!
577 push(@ACC,shift(@ACC));
579 vmlal
.u32
@ACC[0],$Bi,${A0
}[0]
580 vld1
.64
{@ACC[7]},[$tinptr,:128]
581 vmlal
.u32
@ACC[1],$Bi,${A0
}[1]
582 vld1
.32
{$Ni},[$bnptr,:64]! @ pull smashed m
[8*i
+$i]
583 vmlal
.u32
@ACC[2],$Bi,${A1
}[0]
585 addne
$tinptr,$tinptr,#16 @ don't advance in last iteration
586 vmlal
.u32
@ACC[3],$Bi,${A1
}[1]
587 vmlal
.u32
@ACC[4],$Bi,${A2
}[0]
588 vmlal
.u32
@ACC[5],$Bi,${A2
}[1]
589 vmlal
.u32
@ACC[6],$Bi,${A3
}[0]
590 vmlal
.u32
@ACC[7],$Bi,${A3
}[1]
595 subeq
$aptr,$aptr,$num,lsl
#2 @ rewind
596 vmlal
.u32
@ACC[0],$Ni,${N0
}[0]
597 vld1
.32
{$Bi},[sp
,:64] @ pull smashed b
[8*i
+0]
598 vmlal
.u32
@ACC[1],$Ni,${N0
}[1]
599 vld1
.32
{$A0-$A3},[$aptr]!
600 vmlal
.u32
@ACC[2],$Ni,${N1
}[0]
601 add
$bnptr,sp
,#8 @ rewind
602 vmlal
.u32
@ACC[3],$Ni,${N1
}[1]
603 vmlal
.u32
@ACC[4],$Ni,${N2
}[0]
604 vmlal
.u32
@ACC[5],$Ni,${N2
}[1]
605 vmlal
.u32
@ACC[6],$Ni,${N3
}[0]
606 vst1
.64
{@ACC[0]},[$toutptr,:128]!
607 vmlal
.u32
@ACC[7],$Ni,${N3
}[1]
611 push(@ACC,shift(@ACC));
614 vst1
.64
{@ACC[0]-@ACC[1]},[$toutptr,:256]!
615 veor q2
,q2
,q2 @
$N0-$N1
616 vst1
.64
{@ACC[2]-@ACC[3]},[$toutptr,:256]!
617 veor q3
,q3
,q3 @
$N2-$N3
618 vst1
.64
{@ACC[4]-@ACC[5]},[$toutptr,:256]!
619 vst1
.64
{@ACC[6]},[$toutptr,:128]
621 subs
$outer,$outer,#8
622 vld1
.64
{@ACC[0]-@ACC[1]},[$tinptr,:256]!
623 vld1
.64
{@ACC[2]-@ACC[3]},[$tinptr,:256]!
624 vld1
.64
{@ACC[4]-@ACC[5]},[$tinptr,:256]!
625 vld1
.64
{@ACC[6]-@ACC[7]},[$tinptr,:256]!
628 subne
$nptr,$nptr,$num,lsl
#2 @ rewind
632 vst1
.64
{q2
-q3
}, [sp
,:256]! @ start wiping stack frame
633 vshr
.u64
$temp,@ACC[0]#lo,#16
634 vst1
.64
{q2
-q3
},[sp
,:256]!
635 vadd
.u64
@ACC[0]#hi,@ACC[0]#hi,$temp
636 vst1
.64
{q2
-q3
}, [sp
,:256]!
637 vshr
.u64
$temp,@ACC[0]#hi,#16
638 vst1
.64
{q2
-q3
}, [sp
,:256]!
639 vzip
.16 @ACC[0]#lo,@ACC[0]#hi
646 vadd
.u64
@ACC[0]#lo,@ACC[0]#lo,$temp
647 vshr
.u64
$temp,@ACC[0]#lo,#16
648 vld1
.64
{@ACC[2]-@ACC[3]}, [$tinptr, :256]!
649 vadd
.u64
@ACC[0]#hi,@ACC[0]#hi,$temp
650 vld1
.64
{@ACC[4]-@ACC[5]}, [$tinptr, :256]!
651 vshr
.u64
$temp,@ACC[0]#hi,#16
652 vld1
.64
{@ACC[6]-@ACC[7]}, [$tinptr, :256]!
653 vzip
.16 @ACC[0]#lo,@ACC[0]#hi
657 for ($i=1; $i<8; $i++) {
659 vadd
.u64
@ACC[1]#lo,@ACC[1]#lo,$temp
660 vst1
.32
{@ACC[0]#lo[0]}, [$toutptr, :32]!
661 vshr
.u64
$temp,@ACC[1]#lo,#16
662 vadd
.u64
@ACC[1]#hi,@ACC[1]#hi,$temp
663 vshr
.u64
$temp,@ACC[1]#hi,#16
664 vzip
.16 @ACC[1]#lo,@ACC[1]#hi
666 push(@ACC,shift(@ACC));
668 push(@ACC,shift(@ACC));
670 vld1
.64
{@ACC[0]-@ACC[1]}, [$tinptr, :256]!
671 subs
$inner,$inner,#8
672 vst1
.32
{@ACC[7]#lo[0]}, [$toutptr, :32]!
675 vst1
.32
{${temp
}[0]}, [$toutptr, :32] @ top
-most bit
676 sub $nptr,$nptr,$num,lsl
#2 @ rewind $nptr
677 subs
$aptr,sp
,#0 @ clear carry flag
678 add
$bptr,sp
,$num,lsl
#2
681 ldmia
$aptr!, {r4
-r7
}
682 ldmia
$nptr!, {r8
-r11
}
687 teq
$aptr,$bptr @ preserves carry
688 stmia
$rptr!, {r8
-r11
}
691 ldr r10
, [$aptr] @ load top
-most bit
694 sub r11
,$bptr,r11 @ this is num
*4
697 sub $rptr,$rptr,r11 @ rewind
$rptr
698 mov
$nptr,$bptr @ second
3/4th of frame
699 sbcs r10
,r10
,#0 @ result is carry flag
702 ldmia
$aptr!, {r4
-r7
}
703 ldmia
$rptr, {r8
-r11
}
706 vst1
.64
{q0
-q1
}, [$nptr,:256]! @ wipe
710 vst1
.64
{q0
-q1
}, [$nptr,:256]! @ wipe
714 stmia
$rptr!, {r8
-r11
}
716 ldmia
$rptr, {r8
-r11
}
719 vst1
.64
{q0
-q1
}, [$aptr,:256]! @ wipe
723 vst1
.64
{q0
-q1
}, [$nptr,:256]! @ wipe
726 teq
$aptr,$bptr @ preserves carry
727 stmia
$rptr!, {r8
-r11
}
728 bne
.LNEON_copy_n_zap
734 .size bn_mul8x_mont_neon
,.-bn_mul8x_mont_neon
739 .asciz
"Montgomery multiplication for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
741 #if __ARM_MAX_ARCH__>=7
742 .comm OPENSSL_armcap_P
,4,4
746 foreach (split("\n",$code)) {
747 s/\`([^\`]*)\`/eval $1/ge;
749 s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/ge or
751 s/\bbx\s+lr\b/.word\t0xe12fff1e/g; # make it possible to compile with -march=armv4