]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/bn/asm/ppc-mont.pl
ce0b061a7d66fa745dcd00b10caea72b74d08aa4
[thirdparty/openssl.git] / crypto / bn / asm / ppc-mont.pl
1 #! /usr/bin/env perl
2 # Copyright 2006-2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9
10 # ====================================================================
11 # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16
17 # April 2006
18
19 # "Teaser" Montgomery multiplication module for PowerPC. It's possible
20 # to gain a bit more by modulo-scheduling outer loop, then dedicated
21 # squaring procedure should give further 20% and code can be adapted
22 # for 32-bit application running on 64-bit CPU. As for the latter.
23 # It won't be able to achieve "native" 64-bit performance, because in
24 # 32-bit application context every addc instruction will have to be
25 # expanded as addc, twice right shift by 32 and finally adde, etc.
26 # So far RSA *sign* performance improvement over pre-bn_mul_mont asm
27 # for 64-bit application running on PPC970/G5 is:
28 #
29 # 512-bit +65%
30 # 1024-bit +35%
31 # 2048-bit +18%
32 # 4096-bit +4%
33
34 # September 2016
35 #
36 # Add multiplication procedure operating on lengths divisible by 4
37 # and squaring procedure operating on lengths divisible by 8. Length
38 # is expressed in number of limbs. RSA private key operations are
39 # ~35-50% faster (more for longer keys) on contemporary high-end POWER
40 # processors in 64-bit builds, [mysterously enough] more in 32-bit
41 # builds. On low-end 32-bit processors performance improvement turned
42 # to be marginal...
43
44 $flavour = shift;
45
46 if ($flavour =~ /32/) {
47 $BITS= 32;
48 $BNSZ= $BITS/8;
49 $SIZE_T=4;
50 $RZONE= 224;
51
52 $LD= "lwz"; # load
53 $LDU= "lwzu"; # load and update
54 $LDX= "lwzx"; # load indexed
55 $ST= "stw"; # store
56 $STU= "stwu"; # store and update
57 $STX= "stwx"; # store indexed
58 $STUX= "stwux"; # store indexed and update
59 $UMULL= "mullw"; # unsigned multiply low
60 $UMULH= "mulhwu"; # unsigned multiply high
61 $UCMP= "cmplw"; # unsigned compare
62 $SHRI= "srwi"; # unsigned shift right by immediate
63 $SHLI= "slwi"; # unsigned shift left by immediate
64 $PUSH= $ST;
65 $POP= $LD;
66 } elsif ($flavour =~ /64/) {
67 $BITS= 64;
68 $BNSZ= $BITS/8;
69 $SIZE_T=8;
70 $RZONE= 288;
71
72 # same as above, but 64-bit mnemonics...
73 $LD= "ld"; # load
74 $LDU= "ldu"; # load and update
75 $LDX= "ldx"; # load indexed
76 $ST= "std"; # store
77 $STU= "stdu"; # store and update
78 $STX= "stdx"; # store indexed
79 $STUX= "stdux"; # store indexed and update
80 $UMULL= "mulld"; # unsigned multiply low
81 $UMULH= "mulhdu"; # unsigned multiply high
82 $UCMP= "cmpld"; # unsigned compare
83 $SHRI= "srdi"; # unsigned shift right by immediate
84 $SHLI= "sldi"; # unsigned shift left by immediate
85 $PUSH= $ST;
86 $POP= $LD;
87 } else { die "nonsense $flavour"; }
88
89 $FRAME=8*$SIZE_T+$RZONE;
90 $LOCALS=8*$SIZE_T;
91
92 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
93 ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
94 ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
95 die "can't locate ppc-xlate.pl";
96
97 open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
98
99 $sp="r1";
100 $toc="r2";
101 $rp="r3";
102 $ap="r4";
103 $bp="r5";
104 $np="r6";
105 $n0="r7";
106 $num="r8";
107
108 {
109 my $ovf=$rp;
110 my $rp="r9"; # $rp is reassigned
111 my $aj="r10";
112 my $nj="r11";
113 my $tj="r12";
114 # non-volatile registers
115 my $i="r20";
116 my $j="r21";
117 my $tp="r22";
118 my $m0="r23";
119 my $m1="r24";
120 my $lo0="r25";
121 my $hi0="r26";
122 my $lo1="r27";
123 my $hi1="r28";
124 my $alo="r29";
125 my $ahi="r30";
126 my $nlo="r31";
127 #
128 my $nhi="r0";
129
130 $code=<<___;
131 .machine "any"
132 .text
133
134 .globl .bn_mul_mont_int
135 .align 5
136 .bn_mul_mont_int:
137 cmpwi $num,4
138 mr $rp,r3 ; $rp is reassigned
139 li r3,0
140 bltlr
141 ___
142 $code.=<<___ if ($BNSZ==4);
143 cmpwi $num,32 ; longer key performance is not better
144 bgelr
145 ___
146 $code.=<<___;
147 slwi $num,$num,`log($BNSZ)/log(2)`
148 li $tj,-4096
149 addi $ovf,$num,$FRAME
150 subf $ovf,$ovf,$sp ; $sp-$ovf
151 and $ovf,$ovf,$tj ; minimize TLB usage
152 subf $ovf,$sp,$ovf ; $ovf-$sp
153 mr $tj,$sp
154 srwi $num,$num,`log($BNSZ)/log(2)`
155 $STUX $sp,$sp,$ovf
156
157 $PUSH r20,`-12*$SIZE_T`($tj)
158 $PUSH r21,`-11*$SIZE_T`($tj)
159 $PUSH r22,`-10*$SIZE_T`($tj)
160 $PUSH r23,`-9*$SIZE_T`($tj)
161 $PUSH r24,`-8*$SIZE_T`($tj)
162 $PUSH r25,`-7*$SIZE_T`($tj)
163 $PUSH r26,`-6*$SIZE_T`($tj)
164 $PUSH r27,`-5*$SIZE_T`($tj)
165 $PUSH r28,`-4*$SIZE_T`($tj)
166 $PUSH r29,`-3*$SIZE_T`($tj)
167 $PUSH r30,`-2*$SIZE_T`($tj)
168 $PUSH r31,`-1*$SIZE_T`($tj)
169
170 $LD $n0,0($n0) ; pull n0[0] value
171 addi $num,$num,-2 ; adjust $num for counter register
172 \f
173 $LD $m0,0($bp) ; m0=bp[0]
174 $LD $aj,0($ap) ; ap[0]
175 addi $tp,$sp,$LOCALS
176 $UMULL $lo0,$aj,$m0 ; ap[0]*bp[0]
177 $UMULH $hi0,$aj,$m0
178
179 $LD $aj,$BNSZ($ap) ; ap[1]
180 $LD $nj,0($np) ; np[0]
181
182 $UMULL $m1,$lo0,$n0 ; "tp[0]"*n0
183
184 $UMULL $alo,$aj,$m0 ; ap[1]*bp[0]
185 $UMULH $ahi,$aj,$m0
186
187 $UMULL $lo1,$nj,$m1 ; np[0]*m1
188 $UMULH $hi1,$nj,$m1
189 $LD $nj,$BNSZ($np) ; np[1]
190 addc $lo1,$lo1,$lo0
191 addze $hi1,$hi1
192
193 $UMULL $nlo,$nj,$m1 ; np[1]*m1
194 $UMULH $nhi,$nj,$m1
195
196 mtctr $num
197 li $j,`2*$BNSZ`
198 .align 4
199 L1st:
200 $LDX $aj,$ap,$j ; ap[j]
201 addc $lo0,$alo,$hi0
202 $LDX $nj,$np,$j ; np[j]
203 addze $hi0,$ahi
204 $UMULL $alo,$aj,$m0 ; ap[j]*bp[0]
205 addc $lo1,$nlo,$hi1
206 $UMULH $ahi,$aj,$m0
207 addze $hi1,$nhi
208 $UMULL $nlo,$nj,$m1 ; np[j]*m1
209 addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[0]
210 $UMULH $nhi,$nj,$m1
211 addze $hi1,$hi1
212 $ST $lo1,0($tp) ; tp[j-1]
213
214 addi $j,$j,$BNSZ ; j++
215 addi $tp,$tp,$BNSZ ; tp++
216 bdnz L1st
217 ;L1st
218 addc $lo0,$alo,$hi0
219 addze $hi0,$ahi
220
221 addc $lo1,$nlo,$hi1
222 addze $hi1,$nhi
223 addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[0]
224 addze $hi1,$hi1
225 $ST $lo1,0($tp) ; tp[j-1]
226
227 li $ovf,0
228 addc $hi1,$hi1,$hi0
229 addze $ovf,$ovf ; upmost overflow bit
230 $ST $hi1,$BNSZ($tp)
231 \f
232 li $i,$BNSZ
233 .align 4
234 Louter:
235 $LDX $m0,$bp,$i ; m0=bp[i]
236 $LD $aj,0($ap) ; ap[0]
237 addi $tp,$sp,$LOCALS
238 $LD $tj,$LOCALS($sp); tp[0]
239 $UMULL $lo0,$aj,$m0 ; ap[0]*bp[i]
240 $UMULH $hi0,$aj,$m0
241 $LD $aj,$BNSZ($ap) ; ap[1]
242 $LD $nj,0($np) ; np[0]
243 addc $lo0,$lo0,$tj ; ap[0]*bp[i]+tp[0]
244 $UMULL $alo,$aj,$m0 ; ap[j]*bp[i]
245 addze $hi0,$hi0
246 $UMULL $m1,$lo0,$n0 ; tp[0]*n0
247 $UMULH $ahi,$aj,$m0
248 $UMULL $lo1,$nj,$m1 ; np[0]*m1
249 $UMULH $hi1,$nj,$m1
250 $LD $nj,$BNSZ($np) ; np[1]
251 addc $lo1,$lo1,$lo0
252 $UMULL $nlo,$nj,$m1 ; np[1]*m1
253 addze $hi1,$hi1
254 $UMULH $nhi,$nj,$m1
255 \f
256 mtctr $num
257 li $j,`2*$BNSZ`
258 .align 4
259 Linner:
260 $LDX $aj,$ap,$j ; ap[j]
261 addc $lo0,$alo,$hi0
262 $LD $tj,$BNSZ($tp) ; tp[j]
263 addze $hi0,$ahi
264 $LDX $nj,$np,$j ; np[j]
265 addc $lo1,$nlo,$hi1
266 $UMULL $alo,$aj,$m0 ; ap[j]*bp[i]
267 addze $hi1,$nhi
268 $UMULH $ahi,$aj,$m0
269 addc $lo0,$lo0,$tj ; ap[j]*bp[i]+tp[j]
270 $UMULL $nlo,$nj,$m1 ; np[j]*m1
271 addze $hi0,$hi0
272 $UMULH $nhi,$nj,$m1
273 addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[i]+tp[j]
274 addi $j,$j,$BNSZ ; j++
275 addze $hi1,$hi1
276 $ST $lo1,0($tp) ; tp[j-1]
277 addi $tp,$tp,$BNSZ ; tp++
278 bdnz Linner
279 ;Linner
280 $LD $tj,$BNSZ($tp) ; tp[j]
281 addc $lo0,$alo,$hi0
282 addze $hi0,$ahi
283 addc $lo0,$lo0,$tj ; ap[j]*bp[i]+tp[j]
284 addze $hi0,$hi0
285
286 addc $lo1,$nlo,$hi1
287 addze $hi1,$nhi
288 addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[i]+tp[j]
289 addze $hi1,$hi1
290 $ST $lo1,0($tp) ; tp[j-1]
291
292 addic $ovf,$ovf,-1 ; move upmost overflow to XER[CA]
293 li $ovf,0
294 adde $hi1,$hi1,$hi0
295 addze $ovf,$ovf
296 $ST $hi1,$BNSZ($tp)
297 ;
298 slwi $tj,$num,`log($BNSZ)/log(2)`
299 $UCMP $i,$tj
300 addi $i,$i,$BNSZ
301 ble Louter
302 \f
303 addi $num,$num,2 ; restore $num
304 subfc $j,$j,$j ; j=0 and "clear" XER[CA]
305 addi $tp,$sp,$LOCALS
306 mtctr $num
307
308 .align 4
309 Lsub: $LDX $tj,$tp,$j
310 $LDX $nj,$np,$j
311 subfe $aj,$nj,$tj ; tp[j]-np[j]
312 $STX $aj,$rp,$j
313 addi $j,$j,$BNSZ
314 bdnz Lsub
315
316 li $j,0
317 mtctr $num
318 subfe $ovf,$j,$ovf ; handle upmost overflow bit
319 and $ap,$tp,$ovf
320 andc $np,$rp,$ovf
321 or $ap,$ap,$np ; ap=borrow?tp:rp
322
323 .align 4
324 Lcopy: ; copy or in-place refresh
325 $LDX $tj,$ap,$j
326 $STX $tj,$rp,$j
327 $STX $j,$tp,$j ; zap at once
328 addi $j,$j,$BNSZ
329 bdnz Lcopy
330
331 $POP $tj,0($sp)
332 li r3,1
333 $POP r20,`-12*$SIZE_T`($tj)
334 $POP r21,`-11*$SIZE_T`($tj)
335 $POP r22,`-10*$SIZE_T`($tj)
336 $POP r23,`-9*$SIZE_T`($tj)
337 $POP r24,`-8*$SIZE_T`($tj)
338 $POP r25,`-7*$SIZE_T`($tj)
339 $POP r26,`-6*$SIZE_T`($tj)
340 $POP r27,`-5*$SIZE_T`($tj)
341 $POP r28,`-4*$SIZE_T`($tj)
342 $POP r29,`-3*$SIZE_T`($tj)
343 $POP r30,`-2*$SIZE_T`($tj)
344 $POP r31,`-1*$SIZE_T`($tj)
345 mr $sp,$tj
346 blr
347 .long 0
348 .byte 0,12,4,0,0x80,12,6,0
349 .long 0
350 .size .bn_mul_mont_int,.-.bn_mul_mont_int
351 ___
352 }
353 if (1) {
354 my ($a0,$a1,$a2,$a3,
355 $t0,$t1,$t2,$t3,
356 $m0,$m1,$m2,$m3,
357 $acc0,$acc1,$acc2,$acc3,$acc4,
358 $bi,$mi,$tp,$ap_end,$cnt) = map("r$_",(9..12,14..31));
359 my ($carry,$zero) = ($rp,"r0");
360
361 # sp----------->+-------------------------------+
362 # | saved sp |
363 # +-------------------------------+
364 # . .
365 # +8*size_t +-------------------------------+
366 # | 4 "n0*t0" |
367 # . .
368 # . .
369 # +12*size_t +-------------------------------+
370 # | size_t tmp[num] |
371 # . .
372 # . .
373 # . .
374 # +-------------------------------+
375 # | topmost carry |
376 # . .
377 # -18*size_t +-------------------------------+
378 # | 18 saved gpr, r14-r31 |
379 # . .
380 # . .
381 # +-------------------------------+
382 $code.=<<___;
383 .globl .bn_mul4x_mont_int
384 .align 5
385 .bn_mul4x_mont_int:
386 andi. r0,$num,7
387 bne .Lmul4x_do
388 $UCMP $ap,$bp
389 bne .Lmul4x_do
390 b .Lsqr8x_do
391 .Lmul4x_do:
392 slwi $num,$num,`log($SIZE_T)/log(2)`
393 mr $a0,$sp
394 li $a1,-32*$SIZE_T
395 sub $a1,$a1,$num
396 $STUX $sp,$sp,$a1 # alloca
397
398 $PUSH r14,-$SIZE_T*18($a0)
399 $PUSH r15,-$SIZE_T*17($a0)
400 $PUSH r16,-$SIZE_T*16($a0)
401 $PUSH r17,-$SIZE_T*15($a0)
402 $PUSH r18,-$SIZE_T*14($a0)
403 $PUSH r19,-$SIZE_T*13($a0)
404 $PUSH r20,-$SIZE_T*12($a0)
405 $PUSH r21,-$SIZE_T*11($a0)
406 $PUSH r22,-$SIZE_T*10($a0)
407 $PUSH r23,-$SIZE_T*9($a0)
408 $PUSH r24,-$SIZE_T*8($a0)
409 $PUSH r25,-$SIZE_T*7($a0)
410 $PUSH r26,-$SIZE_T*6($a0)
411 $PUSH r27,-$SIZE_T*5($a0)
412 $PUSH r28,-$SIZE_T*4($a0)
413 $PUSH r29,-$SIZE_T*3($a0)
414 $PUSH r30,-$SIZE_T*2($a0)
415 $PUSH r31,-$SIZE_T*1($a0)
416
417 subi $ap,$ap,$SIZE_T # bias by -1
418 subi $np,$np,$SIZE_T # bias by -1
419 subi $rp,$rp,$SIZE_T # bias by -1
420 $LD $n0,0($n0) # *n0
421
422 add $t0,$bp,$num
423 add $ap_end,$ap,$num
424 subi $t0,$t0,$SIZE_T*4 # &b[num-4]
425
426 $LD $bi,$SIZE_T*0($bp) # b[0]
427 li $acc0,0
428 $LD $a0,$SIZE_T*1($ap) # a[0..3]
429 li $acc1,0
430 $LD $a1,$SIZE_T*2($ap)
431 li $acc2,0
432 $LD $a2,$SIZE_T*3($ap)
433 li $acc3,0
434 $LDU $a3,$SIZE_T*4($ap)
435 $LD $m0,$SIZE_T*1($np) # n[0..3]
436 $LD $m1,$SIZE_T*2($np)
437 $LD $m2,$SIZE_T*3($np)
438 $LDU $m3,$SIZE_T*4($np)
439
440 $PUSH $rp,$SIZE_T*6($sp) # offload rp and &b[num-4]
441 $PUSH $t0,$SIZE_T*7($sp)
442 li $carry,0
443 addic $tp,$sp,$SIZE_T*7 # &t[-1], clear carry bit
444 li $cnt,0
445 li $zero,0
446 b .Loop_mul4x_1st_reduction
447
448 .align 5
449 .Loop_mul4x_1st_reduction:
450 $UMULL $t0,$a0,$bi # lo(a[0..3]*b[0])
451 addze $carry,$carry # modulo-scheduled
452 $UMULL $t1,$a1,$bi
453 addi $cnt,$cnt,$SIZE_T
454 $UMULL $t2,$a2,$bi
455 andi. $cnt,$cnt,$SIZE_T*4-1
456 $UMULL $t3,$a3,$bi
457 addc $acc0,$acc0,$t0
458 $UMULH $t0,$a0,$bi # hi(a[0..3]*b[0])
459 adde $acc1,$acc1,$t1
460 $UMULH $t1,$a1,$bi
461 adde $acc2,$acc2,$t2
462 $UMULL $mi,$acc0,$n0 # t[0]*n0
463 adde $acc3,$acc3,$t3
464 $UMULH $t2,$a2,$bi
465 addze $acc4,$zero
466 $UMULH $t3,$a3,$bi
467 $LDX $bi,$bp,$cnt # next b[i] (or b[0])
468 addc $acc1,$acc1,$t0
469 # (*) mul $t0,$m0,$mi # lo(n[0..3]*t[0]*n0)
470 $STU $mi,$SIZE_T($tp) # put aside t[0]*n0 for tail processing
471 adde $acc2,$acc2,$t1
472 $UMULL $t1,$m1,$mi
473 adde $acc3,$acc3,$t2
474 $UMULL $t2,$m2,$mi
475 adde $acc4,$acc4,$t3 # can't overflow
476 $UMULL $t3,$m3,$mi
477 # (*) addc $acc0,$acc0,$t0
478 # (*) As for removal of first multiplication and addition
479 # instructions. The outcome of first addition is
480 # guaranteed to be zero, which leaves two computationally
481 # significant outcomes: it either carries or not. Then
482 # question is when does it carry? Is there alternative
483 # way to deduce it? If you follow operations, you can
484 # observe that condition for carry is quite simple:
485 # $acc0 being non-zero. So that carry can be calculated
486 # by adding -1 to $acc0. That's what next instruction does.
487 addic $acc0,$acc0,-1 # (*), discarded
488 $UMULH $t0,$m0,$mi # hi(n[0..3]*t[0]*n0)
489 adde $acc0,$acc1,$t1
490 $UMULH $t1,$m1,$mi
491 adde $acc1,$acc2,$t2
492 $UMULH $t2,$m2,$mi
493 adde $acc2,$acc3,$t3
494 $UMULH $t3,$m3,$mi
495 adde $acc3,$acc4,$carry
496 addze $carry,$zero
497 addc $acc0,$acc0,$t0
498 adde $acc1,$acc1,$t1
499 adde $acc2,$acc2,$t2
500 adde $acc3,$acc3,$t3
501 #addze $carry,$carry
502 bne .Loop_mul4x_1st_reduction
503
504 $UCMP $ap_end,$ap
505 beq .Lmul4x4_post_condition
506
507 $LD $a0,$SIZE_T*1($ap) # a[4..7]
508 $LD $a1,$SIZE_T*2($ap)
509 $LD $a2,$SIZE_T*3($ap)
510 $LDU $a3,$SIZE_T*4($ap)
511 $LD $mi,$SIZE_T*8($sp) # a[0]*n0
512 $LD $m0,$SIZE_T*1($np) # n[4..7]
513 $LD $m1,$SIZE_T*2($np)
514 $LD $m2,$SIZE_T*3($np)
515 $LDU $m3,$SIZE_T*4($np)
516 b .Loop_mul4x_1st_tail
517
518 .align 5
519 .Loop_mul4x_1st_tail:
520 $UMULL $t0,$a0,$bi # lo(a[4..7]*b[i])
521 addze $carry,$carry # modulo-scheduled
522 $UMULL $t1,$a1,$bi
523 addi $cnt,$cnt,$SIZE_T
524 $UMULL $t2,$a2,$bi
525 andi. $cnt,$cnt,$SIZE_T*4-1
526 $UMULL $t3,$a3,$bi
527 addc $acc0,$acc0,$t0
528 $UMULH $t0,$a0,$bi # hi(a[4..7]*b[i])
529 adde $acc1,$acc1,$t1
530 $UMULH $t1,$a1,$bi
531 adde $acc2,$acc2,$t2
532 $UMULH $t2,$a2,$bi
533 adde $acc3,$acc3,$t3
534 $UMULH $t3,$a3,$bi
535 addze $acc4,$zero
536 $LDX $bi,$bp,$cnt # next b[i] (or b[0])
537 addc $acc1,$acc1,$t0
538 $UMULL $t0,$m0,$mi # lo(n[4..7]*a[0]*n0)
539 adde $acc2,$acc2,$t1
540 $UMULL $t1,$m1,$mi
541 adde $acc3,$acc3,$t2
542 $UMULL $t2,$m2,$mi
543 adde $acc4,$acc4,$t3 # can't overflow
544 $UMULL $t3,$m3,$mi
545 addc $acc0,$acc0,$t0
546 $UMULH $t0,$m0,$mi # hi(n[4..7]*a[0]*n0)
547 adde $acc1,$acc1,$t1
548 $UMULH $t1,$m1,$mi
549 adde $acc2,$acc2,$t2
550 $UMULH $t2,$m2,$mi
551 adde $acc3,$acc3,$t3
552 adde $acc4,$acc4,$carry
553 $UMULH $t3,$m3,$mi
554 addze $carry,$zero
555 addi $mi,$sp,$SIZE_T*8
556 $LDX $mi,$mi,$cnt # next t[0]*n0
557 $STU $acc0,$SIZE_T($tp) # word of result
558 addc $acc0,$acc1,$t0
559 adde $acc1,$acc2,$t1
560 adde $acc2,$acc3,$t2
561 adde $acc3,$acc4,$t3
562 #addze $carry,$carry
563 bne .Loop_mul4x_1st_tail
564
565 sub $t1,$ap_end,$num # rewinded $ap
566 $UCMP $ap_end,$ap # done yet?
567 beq .Lmul4x_proceed
568
569 $LD $a0,$SIZE_T*1($ap)
570 $LD $a1,$SIZE_T*2($ap)
571 $LD $a2,$SIZE_T*3($ap)
572 $LDU $a3,$SIZE_T*4($ap)
573 $LD $m0,$SIZE_T*1($np)
574 $LD $m1,$SIZE_T*2($np)
575 $LD $m2,$SIZE_T*3($np)
576 $LDU $m3,$SIZE_T*4($np)
577 b .Loop_mul4x_1st_tail
578
579 .align 5
580 .Lmul4x_proceed:
581 $LDU $bi,$SIZE_T*4($bp) # *++b
582 addze $carry,$carry # topmost carry
583 $LD $a0,$SIZE_T*1($t1)
584 $LD $a1,$SIZE_T*2($t1)
585 $LD $a2,$SIZE_T*3($t1)
586 $LD $a3,$SIZE_T*4($t1)
587 addi $ap,$t1,$SIZE_T*4
588 sub $np,$np,$num # rewind np
589
590 $ST $acc0,$SIZE_T*1($tp) # result
591 $ST $acc1,$SIZE_T*2($tp)
592 $ST $acc2,$SIZE_T*3($tp)
593 $ST $acc3,$SIZE_T*4($tp)
594 $ST $carry,$SIZE_T*5($tp) # save topmost carry
595 $LD $acc0,$SIZE_T*12($sp) # t[0..3]
596 $LD $acc1,$SIZE_T*13($sp)
597 $LD $acc2,$SIZE_T*14($sp)
598 $LD $acc3,$SIZE_T*15($sp)
599
600 $LD $m0,$SIZE_T*1($np) # n[0..3]
601 $LD $m1,$SIZE_T*2($np)
602 $LD $m2,$SIZE_T*3($np)
603 $LDU $m3,$SIZE_T*4($np)
604 addic $tp,$sp,$SIZE_T*7 # &t[-1], clear carry bit
605 li $carry,0
606 b .Loop_mul4x_reduction
607
608 .align 5
609 .Loop_mul4x_reduction:
610 $UMULL $t0,$a0,$bi # lo(a[0..3]*b[4])
611 addze $carry,$carry # modulo-scheduled
612 $UMULL $t1,$a1,$bi
613 addi $cnt,$cnt,$SIZE_T
614 $UMULL $t2,$a2,$bi
615 andi. $cnt,$cnt,$SIZE_T*4-1
616 $UMULL $t3,$a3,$bi
617 addc $acc0,$acc0,$t0
618 $UMULH $t0,$a0,$bi # hi(a[0..3]*b[4])
619 adde $acc1,$acc1,$t1
620 $UMULH $t1,$a1,$bi
621 adde $acc2,$acc2,$t2
622 $UMULL $mi,$acc0,$n0 # t[0]*n0
623 adde $acc3,$acc3,$t3
624 $UMULH $t2,$a2,$bi
625 addze $acc4,$zero
626 $UMULH $t3,$a3,$bi
627 $LDX $bi,$bp,$cnt # next b[i]
628 addc $acc1,$acc1,$t0
629 # (*) mul $t0,$m0,$mi
630 $STU $mi,$SIZE_T($tp) # put aside t[0]*n0 for tail processing
631 adde $acc2,$acc2,$t1
632 $UMULL $t1,$m1,$mi # lo(n[0..3]*t[0]*n0
633 adde $acc3,$acc3,$t2
634 $UMULL $t2,$m2,$mi
635 adde $acc4,$acc4,$t3 # can't overflow
636 $UMULL $t3,$m3,$mi
637 # (*) addc $acc0,$acc0,$t0
638 addic $acc0,$acc0,-1 # (*), discarded
639 $UMULH $t0,$m0,$mi # hi(n[0..3]*t[0]*n0
640 adde $acc0,$acc1,$t1
641 $UMULH $t1,$m1,$mi
642 adde $acc1,$acc2,$t2
643 $UMULH $t2,$m2,$mi
644 adde $acc2,$acc3,$t3
645 $UMULH $t3,$m3,$mi
646 adde $acc3,$acc4,$carry
647 addze $carry,$zero
648 addc $acc0,$acc0,$t0
649 adde $acc1,$acc1,$t1
650 adde $acc2,$acc2,$t2
651 adde $acc3,$acc3,$t3
652 #addze $carry,$carry
653 bne .Loop_mul4x_reduction
654
655 $LD $t0,$SIZE_T*5($tp) # t[4..7]
656 addze $carry,$carry
657 $LD $t1,$SIZE_T*6($tp)
658 $LD $t2,$SIZE_T*7($tp)
659 $LD $t3,$SIZE_T*8($tp)
660 $LD $a0,$SIZE_T*1($ap) # a[4..7]
661 $LD $a1,$SIZE_T*2($ap)
662 $LD $a2,$SIZE_T*3($ap)
663 $LDU $a3,$SIZE_T*4($ap)
664 addc $acc0,$acc0,$t0
665 adde $acc1,$acc1,$t1
666 adde $acc2,$acc2,$t2
667 adde $acc3,$acc3,$t3
668 #addze $carry,$carry
669
670 $LD $mi,$SIZE_T*8($sp) # t[0]*n0
671 $LD $m0,$SIZE_T*1($np) # n[4..7]
672 $LD $m1,$SIZE_T*2($np)
673 $LD $m2,$SIZE_T*3($np)
674 $LDU $m3,$SIZE_T*4($np)
675 b .Loop_mul4x_tail
676
677 .align 5
678 .Loop_mul4x_tail:
679 $UMULL $t0,$a0,$bi # lo(a[4..7]*b[4])
680 addze $carry,$carry # modulo-scheduled
681 $UMULL $t1,$a1,$bi
682 addi $cnt,$cnt,$SIZE_T
683 $UMULL $t2,$a2,$bi
684 andi. $cnt,$cnt,$SIZE_T*4-1
685 $UMULL $t3,$a3,$bi
686 addc $acc0,$acc0,$t0
687 $UMULH $t0,$a0,$bi # hi(a[4..7]*b[4])
688 adde $acc1,$acc1,$t1
689 $UMULH $t1,$a1,$bi
690 adde $acc2,$acc2,$t2
691 $UMULH $t2,$a2,$bi
692 adde $acc3,$acc3,$t3
693 $UMULH $t3,$a3,$bi
694 addze $acc4,$zero
695 $LDX $bi,$bp,$cnt # next b[i]
696 addc $acc1,$acc1,$t0
697 $UMULL $t0,$m0,$mi # lo(n[4..7]*t[0]*n0)
698 adde $acc2,$acc2,$t1
699 $UMULL $t1,$m1,$mi
700 adde $acc3,$acc3,$t2
701 $UMULL $t2,$m2,$mi
702 adde $acc4,$acc4,$t3 # can't overflow
703 $UMULL $t3,$m3,$mi
704 addc $acc0,$acc0,$t0
705 $UMULH $t0,$m0,$mi # hi(n[4..7]*t[0]*n0)
706 adde $acc1,$acc1,$t1
707 $UMULH $t1,$m1,$mi
708 adde $acc2,$acc2,$t2
709 $UMULH $t2,$m2,$mi
710 adde $acc3,$acc3,$t3
711 $UMULH $t3,$m3,$mi
712 adde $acc4,$acc4,$carry
713 addi $mi,$sp,$SIZE_T*8
714 $LDX $mi,$mi,$cnt # next a[0]*n0
715 addze $carry,$zero
716 $STU $acc0,$SIZE_T($tp) # word of result
717 addc $acc0,$acc1,$t0
718 adde $acc1,$acc2,$t1
719 adde $acc2,$acc3,$t2
720 adde $acc3,$acc4,$t3
721 #addze $carry,$carry
722 bne .Loop_mul4x_tail
723
724 $LD $t0,$SIZE_T*5($tp) # next t[i] or topmost carry
725 sub $t1,$np,$num # rewinded np?
726 addze $carry,$carry
727 $UCMP $ap_end,$ap # done yet?
728 beq .Loop_mul4x_break
729
730 $LD $t1,$SIZE_T*6($tp)
731 $LD $t2,$SIZE_T*7($tp)
732 $LD $t3,$SIZE_T*8($tp)
733 $LD $a0,$SIZE_T*1($ap)
734 $LD $a1,$SIZE_T*2($ap)
735 $LD $a2,$SIZE_T*3($ap)
736 $LDU $a3,$SIZE_T*4($ap)
737 addc $acc0,$acc0,$t0
738 adde $acc1,$acc1,$t1
739 adde $acc2,$acc2,$t2
740 adde $acc3,$acc3,$t3
741 #addze $carry,$carry
742
743 $LD $m0,$SIZE_T*1($np) # n[4..7]
744 $LD $m1,$SIZE_T*2($np)
745 $LD $m2,$SIZE_T*3($np)
746 $LDU $m3,$SIZE_T*4($np)
747 b .Loop_mul4x_tail
748
749 .align 5
750 .Loop_mul4x_break:
751 $POP $t2,$SIZE_T*6($sp) # pull rp and &b[num-4]
752 $POP $t3,$SIZE_T*7($sp)
753 addc $a0,$acc0,$t0 # accumulate topmost carry
754 $LD $acc0,$SIZE_T*12($sp) # t[0..3]
755 addze $a1,$acc1
756 $LD $acc1,$SIZE_T*13($sp)
757 addze $a2,$acc2
758 $LD $acc2,$SIZE_T*14($sp)
759 addze $a3,$acc3
760 $LD $acc3,$SIZE_T*15($sp)
761 addze $carry,$carry # topmost carry
762 $ST $a0,$SIZE_T*1($tp) # result
763 sub $ap,$ap_end,$num # rewind ap
764 $ST $a1,$SIZE_T*2($tp)
765 $ST $a2,$SIZE_T*3($tp)
766 $ST $a3,$SIZE_T*4($tp)
767 $ST $carry,$SIZE_T*5($tp) # store topmost carry
768
769 $LD $m0,$SIZE_T*1($t1) # n[0..3]
770 $LD $m1,$SIZE_T*2($t1)
771 $LD $m2,$SIZE_T*3($t1)
772 $LD $m3,$SIZE_T*4($t1)
773 addi $np,$t1,$SIZE_T*4
774 $UCMP $bp,$t3 # done yet?
775 beq .Lmul4x_post
776
777 $LDU $bi,$SIZE_T*4($bp)
778 $LD $a0,$SIZE_T*1($ap) # a[0..3]
779 $LD $a1,$SIZE_T*2($ap)
780 $LD $a2,$SIZE_T*3($ap)
781 $LDU $a3,$SIZE_T*4($ap)
782 li $carry,0
783 addic $tp,$sp,$SIZE_T*7 # &t[-1], clear carry bit
784 b .Loop_mul4x_reduction
785
786 .align 5
787 .Lmul4x_post:
788 # Final step. We see if result is larger than modulus, and
789 # if it is, subtract the modulus. But comparison implies
790 # subtraction. So we subtract modulus, see if it borrowed,
791 # and conditionally copy original value.
792 srwi $cnt,$num,`log($SIZE_T)/log(2)+2`
793 mr $bp,$t2 # &rp[-1]
794 subi $cnt,$cnt,1
795 mr $ap_end,$t2 # &rp[-1] copy
796 subfc $t0,$m0,$acc0
797 addi $tp,$sp,$SIZE_T*15
798 subfe $t1,$m1,$acc1
799
800 mtctr $cnt
801 .Lmul4x_sub:
802 $LD $m0,$SIZE_T*1($np)
803 $LD $acc0,$SIZE_T*1($tp)
804 subfe $t2,$m2,$acc2
805 $LD $m1,$SIZE_T*2($np)
806 $LD $acc1,$SIZE_T*2($tp)
807 subfe $t3,$m3,$acc3
808 $LD $m2,$SIZE_T*3($np)
809 $LD $acc2,$SIZE_T*3($tp)
810 $LDU $m3,$SIZE_T*4($np)
811 $LDU $acc3,$SIZE_T*4($tp)
812 $ST $t0,$SIZE_T*1($bp)
813 $ST $t1,$SIZE_T*2($bp)
814 subfe $t0,$m0,$acc0
815 $ST $t2,$SIZE_T*3($bp)
816 $STU $t3,$SIZE_T*4($bp)
817 subfe $t1,$m1,$acc1
818 bdnz .Lmul4x_sub
819
820 $LD $a0,$SIZE_T*1($ap_end)
821 $ST $t0,$SIZE_T*1($bp)
822 $LD $t0,$SIZE_T*12($sp)
823 subfe $t2,$m2,$acc2
824 $LD $a1,$SIZE_T*2($ap_end)
825 $ST $t1,$SIZE_T*2($bp)
826 $LD $t1,$SIZE_T*13($sp)
827 subfe $t3,$m3,$acc3
828 subfe $carry,$zero,$carry # did it borrow?
829 addi $tp,$sp,$SIZE_T*12
830 $LD $a2,$SIZE_T*3($ap_end)
831 $ST $t2,$SIZE_T*3($bp)
832 $LD $t2,$SIZE_T*14($sp)
833 $LD $a3,$SIZE_T*4($ap_end)
834 $ST $t3,$SIZE_T*4($bp)
835 $LD $t3,$SIZE_T*15($sp)
836
837 mtctr $cnt
838 .Lmul4x_cond_copy:
839 and $t0,$t0,$carry
840 andc $a0,$a0,$carry
841 $ST $zero,$SIZE_T*0($tp) # wipe stack clean
842 and $t1,$t1,$carry
843 andc $a1,$a1,$carry
844 $ST $zero,$SIZE_T*1($tp)
845 and $t2,$t2,$carry
846 andc $a2,$a2,$carry
847 $ST $zero,$SIZE_T*2($tp)
848 and $t3,$t3,$carry
849 andc $a3,$a3,$carry
850 $ST $zero,$SIZE_T*3($tp)
851 or $acc0,$t0,$a0
852 $LD $a0,$SIZE_T*5($ap_end)
853 $LD $t0,$SIZE_T*4($tp)
854 or $acc1,$t1,$a1
855 $LD $a1,$SIZE_T*6($ap_end)
856 $LD $t1,$SIZE_T*5($tp)
857 or $acc2,$t2,$a2
858 $LD $a2,$SIZE_T*7($ap_end)
859 $LD $t2,$SIZE_T*6($tp)
860 or $acc3,$t3,$a3
861 $LD $a3,$SIZE_T*8($ap_end)
862 $LD $t3,$SIZE_T*7($tp)
863 addi $tp,$tp,$SIZE_T*4
864 $ST $acc0,$SIZE_T*1($ap_end)
865 $ST $acc1,$SIZE_T*2($ap_end)
866 $ST $acc2,$SIZE_T*3($ap_end)
867 $STU $acc3,$SIZE_T*4($ap_end)
868 bdnz .Lmul4x_cond_copy
869
870 $POP $bp,0($sp) # pull saved sp
871 and $t0,$t0,$carry
872 andc $a0,$a0,$carry
873 $ST $zero,$SIZE_T*0($tp)
874 and $t1,$t1,$carry
875 andc $a1,$a1,$carry
876 $ST $zero,$SIZE_T*1($tp)
877 and $t2,$t2,$carry
878 andc $a2,$a2,$carry
879 $ST $zero,$SIZE_T*2($tp)
880 and $t3,$t3,$carry
881 andc $a3,$a3,$carry
882 $ST $zero,$SIZE_T*3($tp)
883 or $acc0,$t0,$a0
884 or $acc1,$t1,$a1
885 $ST $zero,$SIZE_T*4($tp)
886 or $acc2,$t2,$a2
887 or $acc3,$t3,$a3
888 $ST $acc0,$SIZE_T*1($ap_end)
889 $ST $acc1,$SIZE_T*2($ap_end)
890 $ST $acc2,$SIZE_T*3($ap_end)
891 $ST $acc3,$SIZE_T*4($ap_end)
892
893 b .Lmul4x_done
894
895 .align 4
896 .Lmul4x4_post_condition:
897 $POP $ap,$SIZE_T*6($sp) # pull &rp[-1]
898 $POP $bp,0($sp) # pull saved sp
899 addze $carry,$carry # modulo-scheduled
900 # $acc0-3,$carry hold result, $m0-3 hold modulus
901 subfc $a0,$m0,$acc0
902 subfe $a1,$m1,$acc1
903 subfe $a2,$m2,$acc2
904 subfe $a3,$m3,$acc3
905 subfe $carry,$zero,$carry # did it borrow?
906
907 and $m0,$m0,$carry
908 and $m1,$m1,$carry
909 addc $a0,$a0,$m0
910 and $m2,$m2,$carry
911 adde $a1,$a1,$m1
912 and $m3,$m3,$carry
913 adde $a2,$a2,$m2
914 adde $a3,$a3,$m3
915
916 $ST $a0,$SIZE_T*1($ap) # write result
917 $ST $a1,$SIZE_T*2($ap)
918 $ST $a2,$SIZE_T*3($ap)
919 $ST $a3,$SIZE_T*4($ap)
920
921 .Lmul4x_done:
922 $ST $zero,$SIZE_T*8($sp) # wipe stack clean
923 $ST $zero,$SIZE_T*9($sp)
924 $ST $zero,$SIZE_T*10($sp)
925 $ST $zero,$SIZE_T*11($sp)
926 li r3,1 # signal "done"
927 $POP r14,-$SIZE_T*18($bp)
928 $POP r15,-$SIZE_T*17($bp)
929 $POP r16,-$SIZE_T*16($bp)
930 $POP r17,-$SIZE_T*15($bp)
931 $POP r18,-$SIZE_T*14($bp)
932 $POP r19,-$SIZE_T*13($bp)
933 $POP r20,-$SIZE_T*12($bp)
934 $POP r21,-$SIZE_T*11($bp)
935 $POP r22,-$SIZE_T*10($bp)
936 $POP r23,-$SIZE_T*9($bp)
937 $POP r24,-$SIZE_T*8($bp)
938 $POP r25,-$SIZE_T*7($bp)
939 $POP r26,-$SIZE_T*6($bp)
940 $POP r27,-$SIZE_T*5($bp)
941 $POP r28,-$SIZE_T*4($bp)
942 $POP r29,-$SIZE_T*3($bp)
943 $POP r30,-$SIZE_T*2($bp)
944 $POP r31,-$SIZE_T*1($bp)
945 mr $sp,$bp
946 blr
947 .long 0
948 .byte 0,12,4,0x20,0x80,18,6,0
949 .long 0
950 .size .bn_mul4x_mont_int,.-.bn_mul4x_mont_int
951 ___
952 }
953
954 if (1) {
955 ########################################################################
956 # Following is PPC adaptation of sqrx8x_mont from x86_64-mont5 module.
957
958 my ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("r$_",(9..12,14..17));
959 my ($t0,$t1,$t2,$t3)=map("r$_",(18..21));
960 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5,$acc6,$acc7)=map("r$_",(22..29));
961 my ($cnt,$carry,$zero)=("r30","r31","r0");
962 my ($tp,$ap_end,$na0)=($bp,$np,$carry);
963
964 # sp----------->+-------------------------------+
965 # | saved sp |
966 # +-------------------------------+
967 # . .
968 # +12*size_t +-------------------------------+
969 # | size_t tmp[2*num] |
970 # . .
971 # . .
972 # . .
973 # +-------------------------------+
974 # . .
975 # -18*size_t +-------------------------------+
976 # | 18 saved gpr, r14-r31 |
977 # . .
978 # . .
979 # +-------------------------------+
980 $code.=<<___;
981 .align 5
982 __bn_sqr8x_mont:
983 .Lsqr8x_do:
984 mr $a0,$sp
985 slwi $a1,$num,`log($SIZE_T)/log(2)+1`
986 li $a2,-32*$SIZE_T
987 sub $a1,$a2,$a1
988 slwi $num,$num,`log($SIZE_T)/log(2)`
989 $STUX $sp,$sp,$a1 # alloca
990
991 $PUSH r14,-$SIZE_T*18($a0)
992 $PUSH r15,-$SIZE_T*17($a0)
993 $PUSH r16,-$SIZE_T*16($a0)
994 $PUSH r17,-$SIZE_T*15($a0)
995 $PUSH r18,-$SIZE_T*14($a0)
996 $PUSH r19,-$SIZE_T*13($a0)
997 $PUSH r20,-$SIZE_T*12($a0)
998 $PUSH r21,-$SIZE_T*11($a0)
999 $PUSH r22,-$SIZE_T*10($a0)
1000 $PUSH r23,-$SIZE_T*9($a0)
1001 $PUSH r24,-$SIZE_T*8($a0)
1002 $PUSH r25,-$SIZE_T*7($a0)
1003 $PUSH r26,-$SIZE_T*6($a0)
1004 $PUSH r27,-$SIZE_T*5($a0)
1005 $PUSH r28,-$SIZE_T*4($a0)
1006 $PUSH r29,-$SIZE_T*3($a0)
1007 $PUSH r30,-$SIZE_T*2($a0)
1008 $PUSH r31,-$SIZE_T*1($a0)
1009
1010 subi $ap,$ap,$SIZE_T # bias by -1
1011 subi $t0,$np,$SIZE_T # bias by -1
1012 subi $rp,$rp,$SIZE_T # bias by -1
1013 $LD $n0,0($n0) # *n0
1014 li $zero,0
1015
1016 add $ap_end,$ap,$num
1017 $LD $a0,$SIZE_T*1($ap)
1018 #li $acc0,0
1019 $LD $a1,$SIZE_T*2($ap)
1020 li $acc1,0
1021 $LD $a2,$SIZE_T*3($ap)
1022 li $acc2,0
1023 $LD $a3,$SIZE_T*4($ap)
1024 li $acc3,0
1025 $LD $a4,$SIZE_T*5($ap)
1026 li $acc4,0
1027 $LD $a5,$SIZE_T*6($ap)
1028 li $acc5,0
1029 $LD $a6,$SIZE_T*7($ap)
1030 li $acc6,0
1031 $LDU $a7,$SIZE_T*8($ap)
1032 li $acc7,0
1033
1034 addi $tp,$sp,$SIZE_T*11 # &tp[-1]
1035 subic. $cnt,$num,$SIZE_T*8
1036 b .Lsqr8x_zero_start
1037
1038 .align 5
1039 .Lsqr8x_zero:
1040 subic. $cnt,$cnt,$SIZE_T*8
1041 $ST $zero,$SIZE_T*1($tp)
1042 $ST $zero,$SIZE_T*2($tp)
1043 $ST $zero,$SIZE_T*3($tp)
1044 $ST $zero,$SIZE_T*4($tp)
1045 $ST $zero,$SIZE_T*5($tp)
1046 $ST $zero,$SIZE_T*6($tp)
1047 $ST $zero,$SIZE_T*7($tp)
1048 $ST $zero,$SIZE_T*8($tp)
1049 .Lsqr8x_zero_start:
1050 $ST $zero,$SIZE_T*9($tp)
1051 $ST $zero,$SIZE_T*10($tp)
1052 $ST $zero,$SIZE_T*11($tp)
1053 $ST $zero,$SIZE_T*12($tp)
1054 $ST $zero,$SIZE_T*13($tp)
1055 $ST $zero,$SIZE_T*14($tp)
1056 $ST $zero,$SIZE_T*15($tp)
1057 $STU $zero,$SIZE_T*16($tp)
1058 bne .Lsqr8x_zero
1059
1060 $PUSH $rp,$SIZE_T*6($sp) # offload &rp[-1]
1061 $PUSH $t0,$SIZE_T*7($sp) # offload &np[-1]
1062 $PUSH $n0,$SIZE_T*8($sp) # offload n0
1063 $PUSH $tp,$SIZE_T*9($sp) # &tp[2*num-1]
1064 $PUSH $zero,$SIZE_T*10($sp) # initial top-most carry
1065 addi $tp,$sp,$SIZE_T*11 # &tp[-1]
1066
1067 # Multiply everything but a[i]*a[i]
1068 .align 5
1069 .Lsqr8x_outer_loop:
1070 # a[1]a[0] (i)
1071 # a[2]a[0]
1072 # a[3]a[0]
1073 # a[4]a[0]
1074 # a[5]a[0]
1075 # a[6]a[0]
1076 # a[7]a[0]
1077 # a[2]a[1] (ii)
1078 # a[3]a[1]
1079 # a[4]a[1]
1080 # a[5]a[1]
1081 # a[6]a[1]
1082 # a[7]a[1]
1083 # a[3]a[2] (iii)
1084 # a[4]a[2]
1085 # a[5]a[2]
1086 # a[6]a[2]
1087 # a[7]a[2]
1088 # a[4]a[3] (iv)
1089 # a[5]a[3]
1090 # a[6]a[3]
1091 # a[7]a[3]
1092 # a[5]a[4] (v)
1093 # a[6]a[4]
1094 # a[7]a[4]
1095 # a[6]a[5] (vi)
1096 # a[7]a[5]
1097 # a[7]a[6] (vii)
1098
1099 $UMULL $t0,$a1,$a0 # lo(a[1..7]*a[0]) (i)
1100 $UMULL $t1,$a2,$a0
1101 $UMULL $t2,$a3,$a0
1102 $UMULL $t3,$a4,$a0
1103 addc $acc1,$acc1,$t0 # t[1]+lo(a[1]*a[0])
1104 $UMULL $t0,$a5,$a0
1105 adde $acc2,$acc2,$t1
1106 $UMULL $t1,$a6,$a0
1107 adde $acc3,$acc3,$t2
1108 $UMULL $t2,$a7,$a0
1109 adde $acc4,$acc4,$t3
1110 $UMULH $t3,$a1,$a0 # hi(a[1..7]*a[0])
1111 adde $acc5,$acc5,$t0
1112 $UMULH $t0,$a2,$a0
1113 adde $acc6,$acc6,$t1
1114 $UMULH $t1,$a3,$a0
1115 adde $acc7,$acc7,$t2
1116 $UMULH $t2,$a4,$a0
1117 $ST $acc0,$SIZE_T*1($tp) # t[0]
1118 addze $acc0,$zero # t[8]
1119 $ST $acc1,$SIZE_T*2($tp) # t[1]
1120 addc $acc2,$acc2,$t3 # t[2]+lo(a[1]*a[0])
1121 $UMULH $t3,$a5,$a0
1122 adde $acc3,$acc3,$t0
1123 $UMULH $t0,$a6,$a0
1124 adde $acc4,$acc4,$t1
1125 $UMULH $t1,$a7,$a0
1126 adde $acc5,$acc5,$t2
1127 $UMULL $t2,$a2,$a1 # lo(a[2..7]*a[1]) (ii)
1128 adde $acc6,$acc6,$t3
1129 $UMULL $t3,$a3,$a1
1130 adde $acc7,$acc7,$t0
1131 $UMULL $t0,$a4,$a1
1132 adde $acc0,$acc0,$t1
1133
1134 $UMULL $t1,$a5,$a1
1135 addc $acc3,$acc3,$t2
1136 $UMULL $t2,$a6,$a1
1137 adde $acc4,$acc4,$t3
1138 $UMULL $t3,$a7,$a1
1139 adde $acc5,$acc5,$t0
1140 $UMULH $t0,$a2,$a1 # hi(a[2..7]*a[1])
1141 adde $acc6,$acc6,$t1
1142 $UMULH $t1,$a3,$a1
1143 adde $acc7,$acc7,$t2
1144 $UMULH $t2,$a4,$a1
1145 adde $acc0,$acc0,$t3
1146 $UMULH $t3,$a5,$a1
1147 $ST $acc2,$SIZE_T*3($tp) # t[2]
1148 addze $acc1,$zero # t[9]
1149 $ST $acc3,$SIZE_T*4($tp) # t[3]
1150 addc $acc4,$acc4,$t0
1151 $UMULH $t0,$a6,$a1
1152 adde $acc5,$acc5,$t1
1153 $UMULH $t1,$a7,$a1
1154 adde $acc6,$acc6,$t2
1155 $UMULL $t2,$a3,$a2 # lo(a[3..7]*a[2]) (iii)
1156 adde $acc7,$acc7,$t3
1157 $UMULL $t3,$a4,$a2
1158 adde $acc0,$acc0,$t0
1159 $UMULL $t0,$a5,$a2
1160 adde $acc1,$acc1,$t1
1161
1162 $UMULL $t1,$a6,$a2
1163 addc $acc5,$acc5,$t2
1164 $UMULL $t2,$a7,$a2
1165 adde $acc6,$acc6,$t3
1166 $UMULH $t3,$a3,$a2 # hi(a[3..7]*a[2])
1167 adde $acc7,$acc7,$t0
1168 $UMULH $t0,$a4,$a2
1169 adde $acc0,$acc0,$t1
1170 $UMULH $t1,$a5,$a2
1171 adde $acc1,$acc1,$t2
1172 $UMULH $t2,$a6,$a2
1173 $ST $acc4,$SIZE_T*5($tp) # t[4]
1174 addze $acc2,$zero # t[10]
1175 $ST $acc5,$SIZE_T*6($tp) # t[5]
1176 addc $acc6,$acc6,$t3
1177 $UMULH $t3,$a7,$a2
1178 adde $acc7,$acc7,$t0
1179 $UMULL $t0,$a4,$a3 # lo(a[4..7]*a[3]) (iv)
1180 adde $acc0,$acc0,$t1
1181 $UMULL $t1,$a5,$a3
1182 adde $acc1,$acc1,$t2
1183 $UMULL $t2,$a6,$a3
1184 adde $acc2,$acc2,$t3
1185
1186 $UMULL $t3,$a7,$a3
1187 addc $acc7,$acc7,$t0
1188 $UMULH $t0,$a4,$a3 # hi(a[4..7]*a[3])
1189 adde $acc0,$acc0,$t1
1190 $UMULH $t1,$a5,$a3
1191 adde $acc1,$acc1,$t2
1192 $UMULH $t2,$a6,$a3
1193 adde $acc2,$acc2,$t3
1194 $UMULH $t3,$a7,$a3
1195 $ST $acc6,$SIZE_T*7($tp) # t[6]
1196 addze $acc3,$zero # t[11]
1197 $STU $acc7,$SIZE_T*8($tp) # t[7]
1198 addc $acc0,$acc0,$t0
1199 $UMULL $t0,$a5,$a4 # lo(a[5..7]*a[4]) (v)
1200 adde $acc1,$acc1,$t1
1201 $UMULL $t1,$a6,$a4
1202 adde $acc2,$acc2,$t2
1203 $UMULL $t2,$a7,$a4
1204 adde $acc3,$acc3,$t3
1205
1206 $UMULH $t3,$a5,$a4 # hi(a[5..7]*a[4])
1207 addc $acc1,$acc1,$t0
1208 $UMULH $t0,$a6,$a4
1209 adde $acc2,$acc2,$t1
1210 $UMULH $t1,$a7,$a4
1211 adde $acc3,$acc3,$t2
1212 $UMULL $t2,$a6,$a5 # lo(a[6..7]*a[5]) (vi)
1213 addze $acc4,$zero # t[12]
1214 addc $acc2,$acc2,$t3
1215 $UMULL $t3,$a7,$a5
1216 adde $acc3,$acc3,$t0
1217 $UMULH $t0,$a6,$a5 # hi(a[6..7]*a[5])
1218 adde $acc4,$acc4,$t1
1219
1220 $UMULH $t1,$a7,$a5
1221 addc $acc3,$acc3,$t2
1222 $UMULL $t2,$a7,$a6 # lo(a[7]*a[6]) (vii)
1223 adde $acc4,$acc4,$t3
1224 $UMULH $t3,$a7,$a6 # hi(a[7]*a[6])
1225 addze $acc5,$zero # t[13]
1226 addc $acc4,$acc4,$t0
1227 $UCMP $ap_end,$ap # done yet?
1228 adde $acc5,$acc5,$t1
1229
1230 addc $acc5,$acc5,$t2
1231 sub $t0,$ap_end,$num # rewinded ap
1232 addze $acc6,$zero # t[14]
1233 add $acc6,$acc6,$t3
1234
1235 beq .Lsqr8x_outer_break
1236
1237 mr $n0,$a0
1238 $LD $a0,$SIZE_T*1($tp)
1239 $LD $a1,$SIZE_T*2($tp)
1240 $LD $a2,$SIZE_T*3($tp)
1241 $LD $a3,$SIZE_T*4($tp)
1242 $LD $a4,$SIZE_T*5($tp)
1243 $LD $a5,$SIZE_T*6($tp)
1244 $LD $a6,$SIZE_T*7($tp)
1245 $LD $a7,$SIZE_T*8($tp)
1246 addc $acc0,$acc0,$a0
1247 $LD $a0,$SIZE_T*1($ap)
1248 adde $acc1,$acc1,$a1
1249 $LD $a1,$SIZE_T*2($ap)
1250 adde $acc2,$acc2,$a2
1251 $LD $a2,$SIZE_T*3($ap)
1252 adde $acc3,$acc3,$a3
1253 $LD $a3,$SIZE_T*4($ap)
1254 adde $acc4,$acc4,$a4
1255 $LD $a4,$SIZE_T*5($ap)
1256 adde $acc5,$acc5,$a5
1257 $LD $a5,$SIZE_T*6($ap)
1258 adde $acc6,$acc6,$a6
1259 $LD $a6,$SIZE_T*7($ap)
1260 subi $rp,$ap,$SIZE_T*7
1261 addze $acc7,$a7
1262 $LDU $a7,$SIZE_T*8($ap)
1263 #addze $carry,$zero # moved below
1264 li $cnt,0
1265 b .Lsqr8x_mul
1266
1267 # a[8]a[0]
1268 # a[9]a[0]
1269 # a[a]a[0]
1270 # a[b]a[0]
1271 # a[c]a[0]
1272 # a[d]a[0]
1273 # a[e]a[0]
1274 # a[f]a[0]
1275 # a[8]a[1]
1276 # a[f]a[1]........................
1277 # a[8]a[2]
1278 # a[f]a[2]........................
1279 # a[8]a[3]
1280 # a[f]a[3]........................
1281 # a[8]a[4]
1282 # a[f]a[4]........................
1283 # a[8]a[5]
1284 # a[f]a[5]........................
1285 # a[8]a[6]
1286 # a[f]a[6]........................
1287 # a[8]a[7]
1288 # a[f]a[7]........................
1289 .align 5
1290 .Lsqr8x_mul:
1291 $UMULL $t0,$a0,$n0
1292 addze $carry,$zero # carry bit, modulo-scheduled
1293 $UMULL $t1,$a1,$n0
1294 addi $cnt,$cnt,$SIZE_T
1295 $UMULL $t2,$a2,$n0
1296 andi. $cnt,$cnt,$SIZE_T*8-1
1297 $UMULL $t3,$a3,$n0
1298 addc $acc0,$acc0,$t0
1299 $UMULL $t0,$a4,$n0
1300 adde $acc1,$acc1,$t1
1301 $UMULL $t1,$a5,$n0
1302 adde $acc2,$acc2,$t2
1303 $UMULL $t2,$a6,$n0
1304 adde $acc3,$acc3,$t3
1305 $UMULL $t3,$a7,$n0
1306 adde $acc4,$acc4,$t0
1307 $UMULH $t0,$a0,$n0
1308 adde $acc5,$acc5,$t1
1309 $UMULH $t1,$a1,$n0
1310 adde $acc6,$acc6,$t2
1311 $UMULH $t2,$a2,$n0
1312 adde $acc7,$acc7,$t3
1313 $UMULH $t3,$a3,$n0
1314 addze $carry,$carry
1315 $STU $acc0,$SIZE_T($tp)
1316 addc $acc0,$acc1,$t0
1317 $UMULH $t0,$a4,$n0
1318 adde $acc1,$acc2,$t1
1319 $UMULH $t1,$a5,$n0
1320 adde $acc2,$acc3,$t2
1321 $UMULH $t2,$a6,$n0
1322 adde $acc3,$acc4,$t3
1323 $UMULH $t3,$a7,$n0
1324 $LDX $n0,$rp,$cnt
1325 adde $acc4,$acc5,$t0
1326 adde $acc5,$acc6,$t1
1327 adde $acc6,$acc7,$t2
1328 adde $acc7,$carry,$t3
1329 #addze $carry,$zero # moved above
1330 bne .Lsqr8x_mul
1331 # note that carry flag is guaranteed
1332 # to be zero at this point
1333 $UCMP $ap,$ap_end # done yet?
1334 beq .Lsqr8x_break
1335
1336 $LD $a0,$SIZE_T*1($tp)
1337 $LD $a1,$SIZE_T*2($tp)
1338 $LD $a2,$SIZE_T*3($tp)
1339 $LD $a3,$SIZE_T*4($tp)
1340 $LD $a4,$SIZE_T*5($tp)
1341 $LD $a5,$SIZE_T*6($tp)
1342 $LD $a6,$SIZE_T*7($tp)
1343 $LD $a7,$SIZE_T*8($tp)
1344 addc $acc0,$acc0,$a0
1345 $LD $a0,$SIZE_T*1($ap)
1346 adde $acc1,$acc1,$a1
1347 $LD $a1,$SIZE_T*2($ap)
1348 adde $acc2,$acc2,$a2
1349 $LD $a2,$SIZE_T*3($ap)
1350 adde $acc3,$acc3,$a3
1351 $LD $a3,$SIZE_T*4($ap)
1352 adde $acc4,$acc4,$a4
1353 $LD $a4,$SIZE_T*5($ap)
1354 adde $acc5,$acc5,$a5
1355 $LD $a5,$SIZE_T*6($ap)
1356 adde $acc6,$acc6,$a6
1357 $LD $a6,$SIZE_T*7($ap)
1358 adde $acc7,$acc7,$a7
1359 $LDU $a7,$SIZE_T*8($ap)
1360 #addze $carry,$zero # moved above
1361 b .Lsqr8x_mul
1362
1363 .align 5
1364 .Lsqr8x_break:
1365 $LD $a0,$SIZE_T*8($rp)
1366 addi $ap,$rp,$SIZE_T*15
1367 $LD $a1,$SIZE_T*9($rp)
1368 sub. $t0,$ap_end,$ap # is it last iteration?
1369 $LD $a2,$SIZE_T*10($rp)
1370 sub $t1,$tp,$t0
1371 $LD $a3,$SIZE_T*11($rp)
1372 $LD $a4,$SIZE_T*12($rp)
1373 $LD $a5,$SIZE_T*13($rp)
1374 $LD $a6,$SIZE_T*14($rp)
1375 $LD $a7,$SIZE_T*15($rp)
1376 beq .Lsqr8x_outer_loop
1377
1378 $ST $acc0,$SIZE_T*1($tp)
1379 $LD $acc0,$SIZE_T*1($t1)
1380 $ST $acc1,$SIZE_T*2($tp)
1381 $LD $acc1,$SIZE_T*2($t1)
1382 $ST $acc2,$SIZE_T*3($tp)
1383 $LD $acc2,$SIZE_T*3($t1)
1384 $ST $acc3,$SIZE_T*4($tp)
1385 $LD $acc3,$SIZE_T*4($t1)
1386 $ST $acc4,$SIZE_T*5($tp)
1387 $LD $acc4,$SIZE_T*5($t1)
1388 $ST $acc5,$SIZE_T*6($tp)
1389 $LD $acc5,$SIZE_T*6($t1)
1390 $ST $acc6,$SIZE_T*7($tp)
1391 $LD $acc6,$SIZE_T*7($t1)
1392 $ST $acc7,$SIZE_T*8($tp)
1393 $LD $acc7,$SIZE_T*8($t1)
1394 mr $tp,$t1
1395 b .Lsqr8x_outer_loop
1396
1397 .align 5
1398 .Lsqr8x_outer_break:
1399 ####################################################################
1400 # Now multiply above result by 2 and add a[n-1]*a[n-1]|...|a[0]*a[0]
1401 $LD $a1,$SIZE_T*1($t0) # recall that $t0 is &a[-1]
1402 $LD $a3,$SIZE_T*2($t0)
1403 $LD $a5,$SIZE_T*3($t0)
1404 $LD $a7,$SIZE_T*4($t0)
1405 addi $ap,$t0,$SIZE_T*4
1406 # "tp[x]" comments are for num==8 case
1407 $LD $t1,$SIZE_T*13($sp) # =tp[1], t[0] is not interesting
1408 $LD $t2,$SIZE_T*14($sp)
1409 $LD $t3,$SIZE_T*15($sp)
1410 $LD $t0,$SIZE_T*16($sp)
1411
1412 $ST $acc0,$SIZE_T*1($tp) # tp[8]=
1413 srwi $cnt,$num,`log($SIZE_T)/log(2)+2`
1414 $ST $acc1,$SIZE_T*2($tp)
1415 subi $cnt,$cnt,1
1416 $ST $acc2,$SIZE_T*3($tp)
1417 $ST $acc3,$SIZE_T*4($tp)
1418 $ST $acc4,$SIZE_T*5($tp)
1419 $ST $acc5,$SIZE_T*6($tp)
1420 $ST $acc6,$SIZE_T*7($tp)
1421 #$ST $acc7,$SIZE_T*8($tp) # tp[15] is not interesting
1422 addi $tp,$sp,$SIZE_T*11 # &tp[-1]
1423 $UMULL $acc0,$a1,$a1
1424 $UMULH $a1,$a1,$a1
1425 add $acc1,$t1,$t1 # <<1
1426 $SHRI $t1,$t1,$BITS-1
1427 $UMULL $a2,$a3,$a3
1428 $UMULH $a3,$a3,$a3
1429 addc $acc1,$acc1,$a1
1430 add $acc2,$t2,$t2
1431 $SHRI $t2,$t2,$BITS-1
1432 add $acc3,$t3,$t3
1433 $SHRI $t3,$t3,$BITS-1
1434 or $acc2,$acc2,$t1
1435
1436 mtctr $cnt
1437 .Lsqr4x_shift_n_add:
1438 $UMULL $a4,$a5,$a5
1439 $UMULH $a5,$a5,$a5
1440 $LD $t1,$SIZE_T*6($tp) # =tp[5]
1441 $LD $a1,$SIZE_T*1($ap)
1442 adde $acc2,$acc2,$a2
1443 add $acc4,$t0,$t0
1444 $SHRI $t0,$t0,$BITS-1
1445 or $acc3,$acc3,$t2
1446 $LD $t2,$SIZE_T*7($tp) # =tp[6]
1447 adde $acc3,$acc3,$a3
1448 $LD $a3,$SIZE_T*2($ap)
1449 add $acc5,$t1,$t1
1450 $SHRI $t1,$t1,$BITS-1
1451 or $acc4,$acc4,$t3
1452 $LD $t3,$SIZE_T*8($tp) # =tp[7]
1453 $UMULL $a6,$a7,$a7
1454 $UMULH $a7,$a7,$a7
1455 adde $acc4,$acc4,$a4
1456 add $acc6,$t2,$t2
1457 $SHRI $t2,$t2,$BITS-1
1458 or $acc5,$acc5,$t0
1459 $LD $t0,$SIZE_T*9($tp) # =tp[8]
1460 adde $acc5,$acc5,$a5
1461 $LD $a5,$SIZE_T*3($ap)
1462 add $acc7,$t3,$t3
1463 $SHRI $t3,$t3,$BITS-1
1464 or $acc6,$acc6,$t1
1465 $LD $t1,$SIZE_T*10($tp) # =tp[9]
1466 $UMULL $a0,$a1,$a1
1467 $UMULH $a1,$a1,$a1
1468 adde $acc6,$acc6,$a6
1469 $ST $acc0,$SIZE_T*1($tp) # tp[0]=
1470 add $acc0,$t0,$t0
1471 $SHRI $t0,$t0,$BITS-1
1472 or $acc7,$acc7,$t2
1473 $LD $t2,$SIZE_T*11($tp) # =tp[10]
1474 adde $acc7,$acc7,$a7
1475 $LDU $a7,$SIZE_T*4($ap)
1476 $ST $acc1,$SIZE_T*2($tp) # tp[1]=
1477 add $acc1,$t1,$t1
1478 $SHRI $t1,$t1,$BITS-1
1479 or $acc0,$acc0,$t3
1480 $LD $t3,$SIZE_T*12($tp) # =tp[11]
1481 $UMULL $a2,$a3,$a3
1482 $UMULH $a3,$a3,$a3
1483 adde $acc0,$acc0,$a0
1484 $ST $acc2,$SIZE_T*3($tp) # tp[2]=
1485 add $acc2,$t2,$t2
1486 $SHRI $t2,$t2,$BITS-1
1487 or $acc1,$acc1,$t0
1488 $LD $t0,$SIZE_T*13($tp) # =tp[12]
1489 adde $acc1,$acc1,$a1
1490 $ST $acc3,$SIZE_T*4($tp) # tp[3]=
1491 $ST $acc4,$SIZE_T*5($tp) # tp[4]=
1492 $ST $acc5,$SIZE_T*6($tp) # tp[5]=
1493 $ST $acc6,$SIZE_T*7($tp) # tp[6]=
1494 $STU $acc7,$SIZE_T*8($tp) # tp[7]=
1495 add $acc3,$t3,$t3
1496 $SHRI $t3,$t3,$BITS-1
1497 or $acc2,$acc2,$t1
1498 bdnz .Lsqr4x_shift_n_add
1499 ___
1500 my ($np,$np_end)=($ap,$ap_end);
1501 $code.=<<___;
1502 $POP $np,$SIZE_T*7($sp) # pull &np[-1] and n0
1503 $POP $n0,$SIZE_T*8($sp)
1504
1505 $UMULL $a4,$a5,$a5
1506 $UMULH $a5,$a5,$a5
1507 $ST $acc0,$SIZE_T*1($tp) # tp[8]=
1508 $LD $acc0,$SIZE_T*12($sp) # =tp[0]
1509 $LD $t1,$SIZE_T*6($tp) # =tp[13]
1510 adde $acc2,$acc2,$a2
1511 add $acc4,$t0,$t0
1512 $SHRI $t0,$t0,$BITS-1
1513 or $acc3,$acc3,$t2
1514 $LD $t2,$SIZE_T*7($tp) # =tp[14]
1515 adde $acc3,$acc3,$a3
1516 add $acc5,$t1,$t1
1517 $SHRI $t1,$t1,$BITS-1
1518 or $acc4,$acc4,$t3
1519 $UMULL $a6,$a7,$a7
1520 $UMULH $a7,$a7,$a7
1521 adde $acc4,$acc4,$a4
1522 add $acc6,$t2,$t2
1523 $SHRI $t2,$t2,$BITS-1
1524 or $acc5,$acc5,$t0
1525 $ST $acc1,$SIZE_T*2($tp) # tp[9]=
1526 $LD $acc1,$SIZE_T*13($sp) # =tp[1]
1527 adde $acc5,$acc5,$a5
1528 or $acc6,$acc6,$t1
1529 $LD $a0,$SIZE_T*1($np)
1530 $LD $a1,$SIZE_T*2($np)
1531 adde $acc6,$acc6,$a6
1532 $LD $a2,$SIZE_T*3($np)
1533 $LD $a3,$SIZE_T*4($np)
1534 adde $acc7,$a7,$t2
1535 $LD $a4,$SIZE_T*5($np)
1536 $LD $a5,$SIZE_T*6($np)
1537
1538 ################################################################
1539 # Reduce by 8 limbs per iteration
1540 $UMULL $na0,$n0,$acc0 # t[0]*n0
1541 li $cnt,8
1542 $LD $a6,$SIZE_T*7($np)
1543 add $np_end,$np,$num
1544 $LDU $a7,$SIZE_T*8($np)
1545 $ST $acc2,$SIZE_T*3($tp) # tp[10]=
1546 $LD $acc2,$SIZE_T*14($sp)
1547 $ST $acc3,$SIZE_T*4($tp) # tp[11]=
1548 $LD $acc3,$SIZE_T*15($sp)
1549 $ST $acc4,$SIZE_T*5($tp) # tp[12]=
1550 $LD $acc4,$SIZE_T*16($sp)
1551 $ST $acc5,$SIZE_T*6($tp) # tp[13]=
1552 $LD $acc5,$SIZE_T*17($sp)
1553 $ST $acc6,$SIZE_T*7($tp) # tp[14]=
1554 $LD $acc6,$SIZE_T*18($sp)
1555 $ST $acc7,$SIZE_T*8($tp) # tp[15]=
1556 $LD $acc7,$SIZE_T*19($sp)
1557 addi $tp,$sp,$SIZE_T*11 # &tp[-1]
1558 mtctr $cnt
1559 b .Lsqr8x_reduction
1560
1561 .align 5
1562 .Lsqr8x_reduction:
1563 # (*) $UMULL $t0,$a0,$na0 # lo(n[0-7])*lo(t[0]*n0)
1564 $UMULL $t1,$a1,$na0
1565 $UMULL $t2,$a2,$na0
1566 $STU $na0,$SIZE_T($tp) # put aside t[0]*n0 for tail processing
1567 $UMULL $t3,$a3,$na0
1568 # (*) addc $acc0,$acc0,$t0
1569 addic $acc0,$acc0,-1 # (*)
1570 $UMULL $t0,$a4,$na0
1571 adde $acc0,$acc1,$t1
1572 $UMULL $t1,$a5,$na0
1573 adde $acc1,$acc2,$t2
1574 $UMULL $t2,$a6,$na0
1575 adde $acc2,$acc3,$t3
1576 $UMULL $t3,$a7,$na0
1577 adde $acc3,$acc4,$t0
1578 $UMULH $t0,$a0,$na0 # hi(n[0-7])*lo(t[0]*n0)
1579 adde $acc4,$acc5,$t1
1580 $UMULH $t1,$a1,$na0
1581 adde $acc5,$acc6,$t2
1582 $UMULH $t2,$a2,$na0
1583 adde $acc6,$acc7,$t3
1584 $UMULH $t3,$a3,$na0
1585 addze $acc7,$zero
1586 addc $acc0,$acc0,$t0
1587 $UMULH $t0,$a4,$na0
1588 adde $acc1,$acc1,$t1
1589 $UMULH $t1,$a5,$na0
1590 adde $acc2,$acc2,$t2
1591 $UMULH $t2,$a6,$na0
1592 adde $acc3,$acc3,$t3
1593 $UMULH $t3,$a7,$na0
1594 $UMULL $na0,$n0,$acc0 # next t[0]*n0
1595 adde $acc4,$acc4,$t0
1596 adde $acc5,$acc5,$t1
1597 adde $acc6,$acc6,$t2
1598 adde $acc7,$acc7,$t3
1599 bdnz .Lsqr8x_reduction
1600
1601 $LD $t0,$SIZE_T*1($tp)
1602 $LD $t1,$SIZE_T*2($tp)
1603 $LD $t2,$SIZE_T*3($tp)
1604 $LD $t3,$SIZE_T*4($tp)
1605 subi $rp,$tp,$SIZE_T*7
1606 $UCMP $np_end,$np # done yet?
1607 addc $acc0,$acc0,$t0
1608 $LD $t0,$SIZE_T*5($tp)
1609 adde $acc1,$acc1,$t1
1610 $LD $t1,$SIZE_T*6($tp)
1611 adde $acc2,$acc2,$t2
1612 $LD $t2,$SIZE_T*7($tp)
1613 adde $acc3,$acc3,$t3
1614 $LD $t3,$SIZE_T*8($tp)
1615 adde $acc4,$acc4,$t0
1616 adde $acc5,$acc5,$t1
1617 adde $acc6,$acc6,$t2
1618 adde $acc7,$acc7,$t3
1619 #addze $carry,$zero # moved below
1620 beq .Lsqr8x8_post_condition
1621
1622 $LD $n0,$SIZE_T*0($rp)
1623 $LD $a0,$SIZE_T*1($np)
1624 $LD $a1,$SIZE_T*2($np)
1625 $LD $a2,$SIZE_T*3($np)
1626 $LD $a3,$SIZE_T*4($np)
1627 $LD $a4,$SIZE_T*5($np)
1628 $LD $a5,$SIZE_T*6($np)
1629 $LD $a6,$SIZE_T*7($np)
1630 $LDU $a7,$SIZE_T*8($np)
1631 li $cnt,0
1632
1633 .align 5
1634 .Lsqr8x_tail:
1635 $UMULL $t0,$a0,$n0
1636 addze $carry,$zero # carry bit, modulo-scheduled
1637 $UMULL $t1,$a1,$n0
1638 addi $cnt,$cnt,$SIZE_T
1639 $UMULL $t2,$a2,$n0
1640 andi. $cnt,$cnt,$SIZE_T*8-1
1641 $UMULL $t3,$a3,$n0
1642 addc $acc0,$acc0,$t0
1643 $UMULL $t0,$a4,$n0
1644 adde $acc1,$acc1,$t1
1645 $UMULL $t1,$a5,$n0
1646 adde $acc2,$acc2,$t2
1647 $UMULL $t2,$a6,$n0
1648 adde $acc3,$acc3,$t3
1649 $UMULL $t3,$a7,$n0
1650 adde $acc4,$acc4,$t0
1651 $UMULH $t0,$a0,$n0
1652 adde $acc5,$acc5,$t1
1653 $UMULH $t1,$a1,$n0
1654 adde $acc6,$acc6,$t2
1655 $UMULH $t2,$a2,$n0
1656 adde $acc7,$acc7,$t3
1657 $UMULH $t3,$a3,$n0
1658 addze $carry,$carry
1659 $STU $acc0,$SIZE_T($tp)
1660 addc $acc0,$acc1,$t0
1661 $UMULH $t0,$a4,$n0
1662 adde $acc1,$acc2,$t1
1663 $UMULH $t1,$a5,$n0
1664 adde $acc2,$acc3,$t2
1665 $UMULH $t2,$a6,$n0
1666 adde $acc3,$acc4,$t3
1667 $UMULH $t3,$a7,$n0
1668 $LDX $n0,$rp,$cnt
1669 adde $acc4,$acc5,$t0
1670 adde $acc5,$acc6,$t1
1671 adde $acc6,$acc7,$t2
1672 adde $acc7,$carry,$t3
1673 #addze $carry,$zero # moved above
1674 bne .Lsqr8x_tail
1675 # note that carry flag is guaranteed
1676 # to be zero at this point
1677 $LD $a0,$SIZE_T*1($tp)
1678 $POP $carry,$SIZE_T*10($sp) # pull top-most carry in case we break
1679 $UCMP $np_end,$np # done yet?
1680 $LD $a1,$SIZE_T*2($tp)
1681 sub $t2,$np_end,$num # rewinded np
1682 $LD $a2,$SIZE_T*3($tp)
1683 $LD $a3,$SIZE_T*4($tp)
1684 $LD $a4,$SIZE_T*5($tp)
1685 $LD $a5,$SIZE_T*6($tp)
1686 $LD $a6,$SIZE_T*7($tp)
1687 $LD $a7,$SIZE_T*8($tp)
1688 beq .Lsqr8x_tail_break
1689
1690 addc $acc0,$acc0,$a0
1691 $LD $a0,$SIZE_T*1($np)
1692 adde $acc1,$acc1,$a1
1693 $LD $a1,$SIZE_T*2($np)
1694 adde $acc2,$acc2,$a2
1695 $LD $a2,$SIZE_T*3($np)
1696 adde $acc3,$acc3,$a3
1697 $LD $a3,$SIZE_T*4($np)
1698 adde $acc4,$acc4,$a4
1699 $LD $a4,$SIZE_T*5($np)
1700 adde $acc5,$acc5,$a5
1701 $LD $a5,$SIZE_T*6($np)
1702 adde $acc6,$acc6,$a6
1703 $LD $a6,$SIZE_T*7($np)
1704 adde $acc7,$acc7,$a7
1705 $LDU $a7,$SIZE_T*8($np)
1706 #addze $carry,$zero # moved above
1707 b .Lsqr8x_tail
1708
1709 .align 5
1710 .Lsqr8x_tail_break:
1711 $POP $n0,$SIZE_T*8($sp) # pull n0
1712 $POP $t3,$SIZE_T*9($sp) # &tp[2*num-1]
1713 addi $cnt,$tp,$SIZE_T*8 # end of current t[num] window
1714
1715 addic $carry,$carry,-1 # "move" top-most carry to carry bit
1716 adde $t0,$acc0,$a0
1717 $LD $acc0,$SIZE_T*8($rp)
1718 $LD $a0,$SIZE_T*1($t2) # recall that $t2 is &n[-1]
1719 adde $t1,$acc1,$a1
1720 $LD $acc1,$SIZE_T*9($rp)
1721 $LD $a1,$SIZE_T*2($t2)
1722 adde $acc2,$acc2,$a2
1723 $LD $a2,$SIZE_T*3($t2)
1724 adde $acc3,$acc3,$a3
1725 $LD $a3,$SIZE_T*4($t2)
1726 adde $acc4,$acc4,$a4
1727 $LD $a4,$SIZE_T*5($t2)
1728 adde $acc5,$acc5,$a5
1729 $LD $a5,$SIZE_T*6($t2)
1730 adde $acc6,$acc6,$a6
1731 $LD $a6,$SIZE_T*7($t2)
1732 adde $acc7,$acc7,$a7
1733 $LD $a7,$SIZE_T*8($t2)
1734 addi $np,$t2,$SIZE_T*8
1735 addze $t2,$zero # top-most carry
1736 $UMULL $na0,$n0,$acc0
1737 $ST $t0,$SIZE_T*1($tp)
1738 $UCMP $cnt,$t3 # did we hit the bottom?
1739 $ST $t1,$SIZE_T*2($tp)
1740 li $cnt,8
1741 $ST $acc2,$SIZE_T*3($tp)
1742 $LD $acc2,$SIZE_T*10($rp)
1743 $ST $acc3,$SIZE_T*4($tp)
1744 $LD $acc3,$SIZE_T*11($rp)
1745 $ST $acc4,$SIZE_T*5($tp)
1746 $LD $acc4,$SIZE_T*12($rp)
1747 $ST $acc5,$SIZE_T*6($tp)
1748 $LD $acc5,$SIZE_T*13($rp)
1749 $ST $acc6,$SIZE_T*7($tp)
1750 $LD $acc6,$SIZE_T*14($rp)
1751 $ST $acc7,$SIZE_T*8($tp)
1752 $LD $acc7,$SIZE_T*15($rp)
1753 $PUSH $t2,$SIZE_T*10($sp) # off-load top-most carry
1754 addi $tp,$rp,$SIZE_T*7 # slide the window
1755 mtctr $cnt
1756 bne .Lsqr8x_reduction
1757
1758 ################################################################
1759 # Final step. We see if result is larger than modulus, and
1760 # if it is, subtract the modulus. But comparison implies
1761 # subtraction. So we subtract modulus, see if it borrowed,
1762 # and conditionally copy original value.
1763 $POP $rp,$SIZE_T*6($sp) # pull &rp[-1]
1764 srwi $cnt,$num,`log($SIZE_T)/log(2)+3`
1765 mr $n0,$tp # put tp aside
1766 addi $tp,$tp,$SIZE_T*8
1767 subi $cnt,$cnt,1
1768 subfc $t0,$a0,$acc0
1769 subfe $t1,$a1,$acc1
1770 mr $carry,$t2
1771 mr $ap_end,$rp # $rp copy
1772
1773 mtctr $cnt
1774 b .Lsqr8x_sub
1775
1776 .align 5
1777 .Lsqr8x_sub:
1778 $LD $a0,$SIZE_T*1($np)
1779 $LD $acc0,$SIZE_T*1($tp)
1780 $LD $a1,$SIZE_T*2($np)
1781 $LD $acc1,$SIZE_T*2($tp)
1782 subfe $t2,$a2,$acc2
1783 $LD $a2,$SIZE_T*3($np)
1784 $LD $acc2,$SIZE_T*3($tp)
1785 subfe $t3,$a3,$acc3
1786 $LD $a3,$SIZE_T*4($np)
1787 $LD $acc3,$SIZE_T*4($tp)
1788 $ST $t0,$SIZE_T*1($rp)
1789 subfe $t0,$a4,$acc4
1790 $LD $a4,$SIZE_T*5($np)
1791 $LD $acc4,$SIZE_T*5($tp)
1792 $ST $t1,$SIZE_T*2($rp)
1793 subfe $t1,$a5,$acc5
1794 $LD $a5,$SIZE_T*6($np)
1795 $LD $acc5,$SIZE_T*6($tp)
1796 $ST $t2,$SIZE_T*3($rp)
1797 subfe $t2,$a6,$acc6
1798 $LD $a6,$SIZE_T*7($np)
1799 $LD $acc6,$SIZE_T*7($tp)
1800 $ST $t3,$SIZE_T*4($rp)
1801 subfe $t3,$a7,$acc7
1802 $LDU $a7,$SIZE_T*8($np)
1803 $LDU $acc7,$SIZE_T*8($tp)
1804 $ST $t0,$SIZE_T*5($rp)
1805 subfe $t0,$a0,$acc0
1806 $ST $t1,$SIZE_T*6($rp)
1807 subfe $t1,$a1,$acc1
1808 $ST $t2,$SIZE_T*7($rp)
1809 $STU $t3,$SIZE_T*8($rp)
1810 bdnz .Lsqr8x_sub
1811
1812 srwi $cnt,$num,`log($SIZE_T)/log(2)+2`
1813 $LD $a0,$SIZE_T*1($ap_end) # original $rp
1814 $LD $acc0,$SIZE_T*1($n0) # original $tp
1815 subi $cnt,$cnt,1
1816 $LD $a1,$SIZE_T*2($ap_end)
1817 $LD $acc1,$SIZE_T*2($n0)
1818 subfe $t2,$a2,$acc2
1819 $LD $a2,$SIZE_T*3($ap_end)
1820 $LD $acc2,$SIZE_T*3($n0)
1821 subfe $t3,$a3,$acc3
1822 $LD $a3,$SIZE_T*4($ap_end)
1823 $LDU $acc3,$SIZE_T*4($n0)
1824 $ST $t0,$SIZE_T*1($rp)
1825 subfe $t0,$a4,$acc4
1826 $ST $t1,$SIZE_T*2($rp)
1827 subfe $t1,$a5,$acc5
1828 $ST $t2,$SIZE_T*3($rp)
1829 subfe $t2,$a6,$acc6
1830 $ST $t3,$SIZE_T*4($rp)
1831 subfe $t3,$a7,$acc7
1832 $ST $t0,$SIZE_T*5($rp)
1833 subfe $carry,$zero,$carry # did it borrow?
1834 $ST $t1,$SIZE_T*6($rp)
1835 $ST $t2,$SIZE_T*7($rp)
1836 $ST $t3,$SIZE_T*8($rp)
1837
1838 addi $tp,$sp,$SIZE_T*11
1839 mtctr $cnt
1840
1841 .Lsqr4x_cond_copy:
1842 andc $a0,$a0,$carry
1843 $ST $zero,-$SIZE_T*3($n0) # wipe stack clean
1844 and $acc0,$acc0,$carry
1845 $ST $zero,-$SIZE_T*2($n0)
1846 andc $a1,$a1,$carry
1847 $ST $zero,-$SIZE_T*1($n0)
1848 and $acc1,$acc1,$carry
1849 $ST $zero,-$SIZE_T*0($n0)
1850 andc $a2,$a2,$carry
1851 $ST $zero,$SIZE_T*1($tp)
1852 and $acc2,$acc2,$carry
1853 $ST $zero,$SIZE_T*2($tp)
1854 andc $a3,$a3,$carry
1855 $ST $zero,$SIZE_T*3($tp)
1856 and $acc3,$acc3,$carry
1857 $STU $zero,$SIZE_T*4($tp)
1858 or $t0,$a0,$acc0
1859 $LD $a0,$SIZE_T*5($ap_end)
1860 $LD $acc0,$SIZE_T*1($n0)
1861 or $t1,$a1,$acc1
1862 $LD $a1,$SIZE_T*6($ap_end)
1863 $LD $acc1,$SIZE_T*2($n0)
1864 or $t2,$a2,$acc2
1865 $LD $a2,$SIZE_T*7($ap_end)
1866 $LD $acc2,$SIZE_T*3($n0)
1867 or $t3,$a3,$acc3
1868 $LD $a3,$SIZE_T*8($ap_end)
1869 $LDU $acc3,$SIZE_T*4($n0)
1870 $ST $t0,$SIZE_T*1($ap_end)
1871 $ST $t1,$SIZE_T*2($ap_end)
1872 $ST $t2,$SIZE_T*3($ap_end)
1873 $STU $t3,$SIZE_T*4($ap_end)
1874 bdnz .Lsqr4x_cond_copy
1875
1876 $POP $ap,0($sp) # pull saved sp
1877 andc $a0,$a0,$carry
1878 and $acc0,$acc0,$carry
1879 andc $a1,$a1,$carry
1880 and $acc1,$acc1,$carry
1881 andc $a2,$a2,$carry
1882 and $acc2,$acc2,$carry
1883 andc $a3,$a3,$carry
1884 and $acc3,$acc3,$carry
1885 or $t0,$a0,$acc0
1886 or $t1,$a1,$acc1
1887 or $t2,$a2,$acc2
1888 or $t3,$a3,$acc3
1889 $ST $t0,$SIZE_T*1($ap_end)
1890 $ST $t1,$SIZE_T*2($ap_end)
1891 $ST $t2,$SIZE_T*3($ap_end)
1892 $ST $t3,$SIZE_T*4($ap_end)
1893
1894 b .Lsqr8x_done
1895
1896 .align 5
1897 .Lsqr8x8_post_condition:
1898 $POP $rp,$SIZE_T*6($sp) # pull rp
1899 $POP $ap,0($sp) # pull saved sp
1900 addze $carry,$zero
1901
1902 # $acc0-7,$carry hold result, $a0-7 hold modulus
1903 subfc $acc0,$a0,$acc0
1904 subfe $acc1,$a1,$acc1
1905 $ST $zero,$SIZE_T*12($sp) # wipe stack clean
1906 $ST $zero,$SIZE_T*13($sp)
1907 subfe $acc2,$a2,$acc2
1908 $ST $zero,$SIZE_T*14($sp)
1909 $ST $zero,$SIZE_T*15($sp)
1910 subfe $acc3,$a3,$acc3
1911 $ST $zero,$SIZE_T*16($sp)
1912 $ST $zero,$SIZE_T*17($sp)
1913 subfe $acc4,$a4,$acc4
1914 $ST $zero,$SIZE_T*18($sp)
1915 $ST $zero,$SIZE_T*19($sp)
1916 subfe $acc5,$a5,$acc5
1917 $ST $zero,$SIZE_T*20($sp)
1918 $ST $zero,$SIZE_T*21($sp)
1919 subfe $acc6,$a6,$acc6
1920 $ST $zero,$SIZE_T*22($sp)
1921 $ST $zero,$SIZE_T*23($sp)
1922 subfe $acc7,$a7,$acc7
1923 $ST $zero,$SIZE_T*24($sp)
1924 $ST $zero,$SIZE_T*25($sp)
1925 subfe $carry,$zero,$carry # did it borrow?
1926 $ST $zero,$SIZE_T*26($sp)
1927 $ST $zero,$SIZE_T*27($sp)
1928
1929 and $a0,$a0,$carry
1930 and $a1,$a1,$carry
1931 addc $acc0,$acc0,$a0 # add modulus back if borrowed
1932 and $a2,$a2,$carry
1933 adde $acc1,$acc1,$a1
1934 and $a3,$a3,$carry
1935 adde $acc2,$acc2,$a2
1936 and $a4,$a4,$carry
1937 adde $acc3,$acc3,$a3
1938 and $a5,$a5,$carry
1939 adde $acc4,$acc4,$a4
1940 and $a6,$a6,$carry
1941 adde $acc5,$acc5,$a5
1942 and $a7,$a7,$carry
1943 adde $acc6,$acc6,$a6
1944 adde $acc7,$acc7,$a7
1945 $ST $acc0,$SIZE_T*1($rp)
1946 $ST $acc1,$SIZE_T*2($rp)
1947 $ST $acc2,$SIZE_T*3($rp)
1948 $ST $acc3,$SIZE_T*4($rp)
1949 $ST $acc4,$SIZE_T*5($rp)
1950 $ST $acc5,$SIZE_T*6($rp)
1951 $ST $acc6,$SIZE_T*7($rp)
1952 $ST $acc7,$SIZE_T*8($rp)
1953
1954 .Lsqr8x_done:
1955 $PUSH $zero,$SIZE_T*8($sp)
1956 $PUSH $zero,$SIZE_T*10($sp)
1957
1958 $POP r14,-$SIZE_T*18($ap)
1959 li r3,1 # signal "done"
1960 $POP r15,-$SIZE_T*17($ap)
1961 $POP r16,-$SIZE_T*16($ap)
1962 $POP r17,-$SIZE_T*15($ap)
1963 $POP r18,-$SIZE_T*14($ap)
1964 $POP r19,-$SIZE_T*13($ap)
1965 $POP r20,-$SIZE_T*12($ap)
1966 $POP r21,-$SIZE_T*11($ap)
1967 $POP r22,-$SIZE_T*10($ap)
1968 $POP r23,-$SIZE_T*9($ap)
1969 $POP r24,-$SIZE_T*8($ap)
1970 $POP r25,-$SIZE_T*7($ap)
1971 $POP r26,-$SIZE_T*6($ap)
1972 $POP r27,-$SIZE_T*5($ap)
1973 $POP r28,-$SIZE_T*4($ap)
1974 $POP r29,-$SIZE_T*3($ap)
1975 $POP r30,-$SIZE_T*2($ap)
1976 $POP r31,-$SIZE_T*1($ap)
1977 mr $sp,$ap
1978 blr
1979 .long 0
1980 .byte 0,12,4,0x20,0x80,18,6,0
1981 .long 0
1982 .size __bn_sqr8x_mont,.-__bn_sqr8x_mont
1983 ___
1984 }
1985 $code.=<<___;
1986 .asciz "Montgomery Multiplication for PPC, CRYPTOGAMS by <appro\@openssl.org>"
1987 ___
1988
1989 $code =~ s/\`([^\`]*)\`/eval $1/gem;
1990 print $code;
1991 close STDOUT;