]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/bn/asm/ppc-mont.pl
213c7ec777ef4b4579257c0735eb45e41bdc2ea7
[thirdparty/openssl.git] / crypto / bn / asm / ppc-mont.pl
1 #! /usr/bin/env perl
2 # Copyright 2006-2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16
17 # April 2006
18
19 # "Teaser" Montgomery multiplication module for PowerPC. It's possible
20 # to gain a bit more by modulo-scheduling outer loop, then dedicated
21 # squaring procedure should give further 20% and code can be adapted
22 # for 32-bit application running on 64-bit CPU. As for the latter.
23 # It won't be able to achieve "native" 64-bit performance, because in
24 # 32-bit application context every addc instruction will have to be
25 # expanded as addc, twice right shift by 32 and finally adde, etc.
26 # So far RSA *sign* performance improvement over pre-bn_mul_mont asm
27 # for 64-bit application running on PPC970/G5 is:
28 #
29 # 512-bit +65%
30 # 1024-bit +35%
31 # 2048-bit +18%
32 # 4096-bit +4%
33
34 # September 2016
35 #
36 # Add multiplication procedure operating on lengths divisible by 4
37 # and squaring procedure operating on lengths divisible by 8. Length
38 # is expressed in number of limbs. RSA private key operations are
39 # ~35-50% faster (more for longer keys) on contemporary high-end POWER
40 # processors in 64-bit builds, [mysterously enough] more in 32-bit
41 # builds. On low-end 32-bit processors performance improvement turned
42 # to be marginal...
43
44 $flavour = shift;
45
46 if ($flavour =~ /32/) {
47 $BITS= 32;
48 $BNSZ= $BITS/8;
49 $SIZE_T=4;
50 $RZONE= 224;
51
52 $LD= "lwz"; # load
53 $LDU= "lwzu"; # load and update
54 $LDX= "lwzx"; # load indexed
55 $ST= "stw"; # store
56 $STU= "stwu"; # store and update
57 $STX= "stwx"; # store indexed
58 $STUX= "stwux"; # store indexed and update
59 $UMULL= "mullw"; # unsigned multiply low
60 $UMULH= "mulhwu"; # unsigned multiply high
61 $UCMP= "cmplw"; # unsigned compare
62 $SHRI= "srwi"; # unsigned shift right by immediate
63 $SHLI= "slwi"; # unsigned shift left by immediate
64 $PUSH= $ST;
65 $POP= $LD;
66 } elsif ($flavour =~ /64/) {
67 $BITS= 64;
68 $BNSZ= $BITS/8;
69 $SIZE_T=8;
70 $RZONE= 288;
71
72 # same as above, but 64-bit mnemonics...
73 $LD= "ld"; # load
74 $LDU= "ldu"; # load and update
75 $LDX= "ldx"; # load indexed
76 $ST= "std"; # store
77 $STU= "stdu"; # store and update
78 $STX= "stdx"; # store indexed
79 $STUX= "stdux"; # store indexed and update
80 $UMULL= "mulld"; # unsigned multiply low
81 $UMULH= "mulhdu"; # unsigned multiply high
82 $UCMP= "cmpld"; # unsigned compare
83 $SHRI= "srdi"; # unsigned shift right by immediate
84 $SHLI= "sldi"; # unsigned shift left by immediate
85 $PUSH= $ST;
86 $POP= $LD;
87 } else { die "nonsense $flavour"; }
88
89 $FRAME=8*$SIZE_T+$RZONE;
90 $LOCALS=8*$SIZE_T;
91
92 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
93 ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
94 ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
95 die "can't locate ppc-xlate.pl";
96
97 open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
98
99 $sp="r1";
100 $toc="r2";
101 $rp="r3";
102 $ap="r4";
103 $bp="r5";
104 $np="r6";
105 $n0="r7";
106 $num="r8";
107
108 {
109 my $ovf=$rp;
110 my $rp="r9"; # $rp is reassigned
111 my $aj="r10";
112 my $nj="r11";
113 my $tj="r12";
114 # non-volatile registers
115 my $i="r20";
116 my $j="r21";
117 my $tp="r22";
118 my $m0="r23";
119 my $m1="r24";
120 my $lo0="r25";
121 my $hi0="r26";
122 my $lo1="r27";
123 my $hi1="r28";
124 my $alo="r29";
125 my $ahi="r30";
126 my $nlo="r31";
127 #
128 my $nhi="r0";
129
130 $code=<<___;
131 .machine "any"
132 .text
133
134 .globl .bn_mul_mont_int
135 .align 5
136 .bn_mul_mont_int:
137 mr $rp,r3 ; $rp is reassigned
138 li r3,0
139 ___
140 $code.=<<___ if ($BNSZ==4);
141 cmpwi $num,32 ; longer key performance is not better
142 bgelr
143 ___
144 $code.=<<___;
145 slwi $num,$num,`log($BNSZ)/log(2)`
146 li $tj,-4096
147 addi $ovf,$num,$FRAME
148 subf $ovf,$ovf,$sp ; $sp-$ovf
149 and $ovf,$ovf,$tj ; minimize TLB usage
150 subf $ovf,$sp,$ovf ; $ovf-$sp
151 mr $tj,$sp
152 srwi $num,$num,`log($BNSZ)/log(2)`
153 $STUX $sp,$sp,$ovf
154
155 $PUSH r20,`-12*$SIZE_T`($tj)
156 $PUSH r21,`-11*$SIZE_T`($tj)
157 $PUSH r22,`-10*$SIZE_T`($tj)
158 $PUSH r23,`-9*$SIZE_T`($tj)
159 $PUSH r24,`-8*$SIZE_T`($tj)
160 $PUSH r25,`-7*$SIZE_T`($tj)
161 $PUSH r26,`-6*$SIZE_T`($tj)
162 $PUSH r27,`-5*$SIZE_T`($tj)
163 $PUSH r28,`-4*$SIZE_T`($tj)
164 $PUSH r29,`-3*$SIZE_T`($tj)
165 $PUSH r30,`-2*$SIZE_T`($tj)
166 $PUSH r31,`-1*$SIZE_T`($tj)
167
168 $LD $n0,0($n0) ; pull n0[0] value
169 addi $num,$num,-2 ; adjust $num for counter register
170 \f
171 $LD $m0,0($bp) ; m0=bp[0]
172 $LD $aj,0($ap) ; ap[0]
173 addi $tp,$sp,$LOCALS
174 $UMULL $lo0,$aj,$m0 ; ap[0]*bp[0]
175 $UMULH $hi0,$aj,$m0
176
177 $LD $aj,$BNSZ($ap) ; ap[1]
178 $LD $nj,0($np) ; np[0]
179
180 $UMULL $m1,$lo0,$n0 ; "tp[0]"*n0
181
182 $UMULL $alo,$aj,$m0 ; ap[1]*bp[0]
183 $UMULH $ahi,$aj,$m0
184
185 $UMULL $lo1,$nj,$m1 ; np[0]*m1
186 $UMULH $hi1,$nj,$m1
187 $LD $nj,$BNSZ($np) ; np[1]
188 addc $lo1,$lo1,$lo0
189 addze $hi1,$hi1
190
191 $UMULL $nlo,$nj,$m1 ; np[1]*m1
192 $UMULH $nhi,$nj,$m1
193
194 mtctr $num
195 li $j,`2*$BNSZ`
196 .align 4
197 L1st:
198 $LDX $aj,$ap,$j ; ap[j]
199 addc $lo0,$alo,$hi0
200 $LDX $nj,$np,$j ; np[j]
201 addze $hi0,$ahi
202 $UMULL $alo,$aj,$m0 ; ap[j]*bp[0]
203 addc $lo1,$nlo,$hi1
204 $UMULH $ahi,$aj,$m0
205 addze $hi1,$nhi
206 $UMULL $nlo,$nj,$m1 ; np[j]*m1
207 addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[0]
208 $UMULH $nhi,$nj,$m1
209 addze $hi1,$hi1
210 $ST $lo1,0($tp) ; tp[j-1]
211
212 addi $j,$j,$BNSZ ; j++
213 addi $tp,$tp,$BNSZ ; tp++
214 bdnz L1st
215 ;L1st
216 addc $lo0,$alo,$hi0
217 addze $hi0,$ahi
218
219 addc $lo1,$nlo,$hi1
220 addze $hi1,$nhi
221 addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[0]
222 addze $hi1,$hi1
223 $ST $lo1,0($tp) ; tp[j-1]
224
225 li $ovf,0
226 addc $hi1,$hi1,$hi0
227 addze $ovf,$ovf ; upmost overflow bit
228 $ST $hi1,$BNSZ($tp)
229 \f
230 li $i,$BNSZ
231 .align 4
232 Louter:
233 $LDX $m0,$bp,$i ; m0=bp[i]
234 $LD $aj,0($ap) ; ap[0]
235 addi $tp,$sp,$LOCALS
236 $LD $tj,$LOCALS($sp); tp[0]
237 $UMULL $lo0,$aj,$m0 ; ap[0]*bp[i]
238 $UMULH $hi0,$aj,$m0
239 $LD $aj,$BNSZ($ap) ; ap[1]
240 $LD $nj,0($np) ; np[0]
241 addc $lo0,$lo0,$tj ; ap[0]*bp[i]+tp[0]
242 $UMULL $alo,$aj,$m0 ; ap[j]*bp[i]
243 addze $hi0,$hi0
244 $UMULL $m1,$lo0,$n0 ; tp[0]*n0
245 $UMULH $ahi,$aj,$m0
246 $UMULL $lo1,$nj,$m1 ; np[0]*m1
247 $UMULH $hi1,$nj,$m1
248 $LD $nj,$BNSZ($np) ; np[1]
249 addc $lo1,$lo1,$lo0
250 $UMULL $nlo,$nj,$m1 ; np[1]*m1
251 addze $hi1,$hi1
252 $UMULH $nhi,$nj,$m1
253 \f
254 mtctr $num
255 li $j,`2*$BNSZ`
256 .align 4
257 Linner:
258 $LDX $aj,$ap,$j ; ap[j]
259 addc $lo0,$alo,$hi0
260 $LD $tj,$BNSZ($tp) ; tp[j]
261 addze $hi0,$ahi
262 $LDX $nj,$np,$j ; np[j]
263 addc $lo1,$nlo,$hi1
264 $UMULL $alo,$aj,$m0 ; ap[j]*bp[i]
265 addze $hi1,$nhi
266 $UMULH $ahi,$aj,$m0
267 addc $lo0,$lo0,$tj ; ap[j]*bp[i]+tp[j]
268 $UMULL $nlo,$nj,$m1 ; np[j]*m1
269 addze $hi0,$hi0
270 $UMULH $nhi,$nj,$m1
271 addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[i]+tp[j]
272 addi $j,$j,$BNSZ ; j++
273 addze $hi1,$hi1
274 $ST $lo1,0($tp) ; tp[j-1]
275 addi $tp,$tp,$BNSZ ; tp++
276 bdnz Linner
277 ;Linner
278 $LD $tj,$BNSZ($tp) ; tp[j]
279 addc $lo0,$alo,$hi0
280 addze $hi0,$ahi
281 addc $lo0,$lo0,$tj ; ap[j]*bp[i]+tp[j]
282 addze $hi0,$hi0
283
284 addc $lo1,$nlo,$hi1
285 addze $hi1,$nhi
286 addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[i]+tp[j]
287 addze $hi1,$hi1
288 $ST $lo1,0($tp) ; tp[j-1]
289
290 addic $ovf,$ovf,-1 ; move upmost overflow to XER[CA]
291 li $ovf,0
292 adde $hi1,$hi1,$hi0
293 addze $ovf,$ovf
294 $ST $hi1,$BNSZ($tp)
295 ;
296 slwi $tj,$num,`log($BNSZ)/log(2)`
297 $UCMP $i,$tj
298 addi $i,$i,$BNSZ
299 ble Louter
300 \f
301 addi $num,$num,2 ; restore $num
302 subfc $j,$j,$j ; j=0 and "clear" XER[CA]
303 addi $tp,$sp,$LOCALS
304 mtctr $num
305
306 .align 4
307 Lsub: $LDX $tj,$tp,$j
308 $LDX $nj,$np,$j
309 subfe $aj,$nj,$tj ; tp[j]-np[j]
310 $STX $aj,$rp,$j
311 addi $j,$j,$BNSZ
312 bdnz Lsub
313
314 li $j,0
315 mtctr $num
316 subfe $ovf,$j,$ovf ; handle upmost overflow bit
317 and $ap,$tp,$ovf
318 andc $np,$rp,$ovf
319 or $ap,$ap,$np ; ap=borrow?tp:rp
320
321 .align 4
322 Lcopy: ; copy or in-place refresh
323 $LDX $tj,$ap,$j
324 $STX $tj,$rp,$j
325 $STX $j,$tp,$j ; zap at once
326 addi $j,$j,$BNSZ
327 bdnz Lcopy
328
329 $POP $tj,0($sp)
330 li r3,1
331 $POP r20,`-12*$SIZE_T`($tj)
332 $POP r21,`-11*$SIZE_T`($tj)
333 $POP r22,`-10*$SIZE_T`($tj)
334 $POP r23,`-9*$SIZE_T`($tj)
335 $POP r24,`-8*$SIZE_T`($tj)
336 $POP r25,`-7*$SIZE_T`($tj)
337 $POP r26,`-6*$SIZE_T`($tj)
338 $POP r27,`-5*$SIZE_T`($tj)
339 $POP r28,`-4*$SIZE_T`($tj)
340 $POP r29,`-3*$SIZE_T`($tj)
341 $POP r30,`-2*$SIZE_T`($tj)
342 $POP r31,`-1*$SIZE_T`($tj)
343 mr $sp,$tj
344 blr
345 .long 0
346 .byte 0,12,4,0,0x80,12,6,0
347 .long 0
348 .size .bn_mul_mont_int,.-.bn_mul_mont_int
349 ___
350 }
351 if (1) {
352 my ($a0,$a1,$a2,$a3,
353 $t0,$t1,$t2,$t3,
354 $m0,$m1,$m2,$m3,
355 $acc0,$acc1,$acc2,$acc3,$acc4,
356 $bi,$mi,$tp,$ap_end,$cnt) = map("r$_",(9..12,14..31));
357 my ($carry,$zero) = ($rp,"r0");
358
359 # sp----------->+-------------------------------+
360 # | saved sp |
361 # +-------------------------------+
362 # . .
363 # +8*size_t +-------------------------------+
364 # | 4 "n0*t0" |
365 # . .
366 # . .
367 # +12*size_t +-------------------------------+
368 # | size_t tmp[num] |
369 # . .
370 # . .
371 # . .
372 # +-------------------------------+
373 # | topmost carry |
374 # . .
375 # -18*size_t +-------------------------------+
376 # | 18 saved gpr, r14-r31 |
377 # . .
378 # . .
379 # +-------------------------------+
380 $code.=<<___;
381 .globl .bn_mul4x_mont_int
382 .align 5
383 .bn_mul4x_mont_int:
384 andi. r0,$num,7
385 bne .Lmul4x_do
386 $UCMP $ap,$bp
387 bne .Lmul4x_do
388 b .Lsqr8x_do
389 .Lmul4x_do:
390 slwi $num,$num,`log($SIZE_T)/log(2)`
391 mr $a0,$sp
392 li $a1,-32*$SIZE_T
393 sub $a1,$a1,$num
394 $STUX $sp,$sp,$a1 # alloca
395
396 $PUSH r14,-$SIZE_T*18($a0)
397 $PUSH r15,-$SIZE_T*17($a0)
398 $PUSH r16,-$SIZE_T*16($a0)
399 $PUSH r17,-$SIZE_T*15($a0)
400 $PUSH r18,-$SIZE_T*14($a0)
401 $PUSH r19,-$SIZE_T*13($a0)
402 $PUSH r20,-$SIZE_T*12($a0)
403 $PUSH r21,-$SIZE_T*11($a0)
404 $PUSH r22,-$SIZE_T*10($a0)
405 $PUSH r23,-$SIZE_T*9($a0)
406 $PUSH r24,-$SIZE_T*8($a0)
407 $PUSH r25,-$SIZE_T*7($a0)
408 $PUSH r26,-$SIZE_T*6($a0)
409 $PUSH r27,-$SIZE_T*5($a0)
410 $PUSH r28,-$SIZE_T*4($a0)
411 $PUSH r29,-$SIZE_T*3($a0)
412 $PUSH r30,-$SIZE_T*2($a0)
413 $PUSH r31,-$SIZE_T*1($a0)
414
415 subi $ap,$ap,$SIZE_T # bias by -1
416 subi $np,$np,$SIZE_T # bias by -1
417 subi $rp,$rp,$SIZE_T # bias by -1
418 $LD $n0,0($n0) # *n0
419
420 add $t0,$bp,$num
421 add $ap_end,$ap,$num
422 subi $t0,$t0,$SIZE_T*4 # &b[num-4]
423
424 $LD $bi,$SIZE_T*0($bp) # b[0]
425 li $acc0,0
426 $LD $a0,$SIZE_T*1($ap) # a[0..3]
427 li $acc1,0
428 $LD $a1,$SIZE_T*2($ap)
429 li $acc2,0
430 $LD $a2,$SIZE_T*3($ap)
431 li $acc3,0
432 $LDU $a3,$SIZE_T*4($ap)
433 $LD $m0,$SIZE_T*1($np) # n[0..3]
434 $LD $m1,$SIZE_T*2($np)
435 $LD $m2,$SIZE_T*3($np)
436 $LDU $m3,$SIZE_T*4($np)
437
438 $PUSH $rp,$SIZE_T*6($sp) # offload rp and &b[num-4]
439 $PUSH $t0,$SIZE_T*7($sp)
440 li $carry,0
441 addic $tp,$sp,$SIZE_T*7 # &t[-1], clear carry bit
442 li $cnt,0
443 li $zero,0
444 b .Loop_mul4x_1st_reduction
445
446 .align 5
447 .Loop_mul4x_1st_reduction:
448 $UMULL $t0,$a0,$bi # lo(a[0..3]*b[0])
449 addze $carry,$carry # modulo-scheduled
450 $UMULL $t1,$a1,$bi
451 addi $cnt,$cnt,$SIZE_T
452 $UMULL $t2,$a2,$bi
453 andi. $cnt,$cnt,$SIZE_T*4-1
454 $UMULL $t3,$a3,$bi
455 addc $acc0,$acc0,$t0
456 $UMULH $t0,$a0,$bi # hi(a[0..3]*b[0])
457 adde $acc1,$acc1,$t1
458 $UMULH $t1,$a1,$bi
459 adde $acc2,$acc2,$t2
460 $UMULL $mi,$acc0,$n0 # t[0]*n0
461 adde $acc3,$acc3,$t3
462 $UMULH $t2,$a2,$bi
463 addze $acc4,$zero
464 $UMULH $t3,$a3,$bi
465 $LDX $bi,$bp,$cnt # next b[i] (or b[0])
466 addc $acc1,$acc1,$t0
467 # (*) mul $t0,$m0,$mi # lo(n[0..3]*t[0]*n0)
468 $STU $mi,$SIZE_T($tp) # put aside t[0]*n0 for tail processing
469 adde $acc2,$acc2,$t1
470 $UMULL $t1,$m1,$mi
471 adde $acc3,$acc3,$t2
472 $UMULL $t2,$m2,$mi
473 adde $acc4,$acc4,$t3 # can't overflow
474 $UMULL $t3,$m3,$mi
475 # (*) addc $acc0,$acc0,$t0
476 # (*) As for removal of first multiplication and addition
477 # instructions. The outcome of first addition is
478 # guaranteed to be zero, which leaves two computationally
479 # significant outcomes: it either carries or not. Then
480 # question is when does it carry? Is there alternative
481 # way to deduce it? If you follow operations, you can
482 # observe that condition for carry is quite simple:
483 # $acc0 being non-zero. So that carry can be calculated
484 # by adding -1 to $acc0. That's what next instruction does.
485 addic $acc0,$acc0,-1 # (*), discarded
486 $UMULH $t0,$m0,$mi # hi(n[0..3]*t[0]*n0)
487 adde $acc0,$acc1,$t1
488 $UMULH $t1,$m1,$mi
489 adde $acc1,$acc2,$t2
490 $UMULH $t2,$m2,$mi
491 adde $acc2,$acc3,$t3
492 $UMULH $t3,$m3,$mi
493 adde $acc3,$acc4,$carry
494 addze $carry,$zero
495 addc $acc0,$acc0,$t0
496 adde $acc1,$acc1,$t1
497 adde $acc2,$acc2,$t2
498 adde $acc3,$acc3,$t3
499 #addze $carry,$carry
500 bne .Loop_mul4x_1st_reduction
501
502 $UCMP $ap_end,$ap
503 beq .Lmul4x4_post_condition
504
505 $LD $a0,$SIZE_T*1($ap) # a[4..7]
506 $LD $a1,$SIZE_T*2($ap)
507 $LD $a2,$SIZE_T*3($ap)
508 $LDU $a3,$SIZE_T*4($ap)
509 $LD $mi,$SIZE_T*8($sp) # a[0]*n0
510 $LD $m0,$SIZE_T*1($np) # n[4..7]
511 $LD $m1,$SIZE_T*2($np)
512 $LD $m2,$SIZE_T*3($np)
513 $LDU $m3,$SIZE_T*4($np)
514 b .Loop_mul4x_1st_tail
515
516 .align 5
517 .Loop_mul4x_1st_tail:
518 $UMULL $t0,$a0,$bi # lo(a[4..7]*b[i])
519 addze $carry,$carry # modulo-scheduled
520 $UMULL $t1,$a1,$bi
521 addi $cnt,$cnt,$SIZE_T
522 $UMULL $t2,$a2,$bi
523 andi. $cnt,$cnt,$SIZE_T*4-1
524 $UMULL $t3,$a3,$bi
525 addc $acc0,$acc0,$t0
526 $UMULH $t0,$a0,$bi # hi(a[4..7]*b[i])
527 adde $acc1,$acc1,$t1
528 $UMULH $t1,$a1,$bi
529 adde $acc2,$acc2,$t2
530 $UMULH $t2,$a2,$bi
531 adde $acc3,$acc3,$t3
532 $UMULH $t3,$a3,$bi
533 addze $acc4,$zero
534 $LDX $bi,$bp,$cnt # next b[i] (or b[0])
535 addc $acc1,$acc1,$t0
536 $UMULL $t0,$m0,$mi # lo(n[4..7]*a[0]*n0)
537 adde $acc2,$acc2,$t1
538 $UMULL $t1,$m1,$mi
539 adde $acc3,$acc3,$t2
540 $UMULL $t2,$m2,$mi
541 adde $acc4,$acc4,$t3 # can't overflow
542 $UMULL $t3,$m3,$mi
543 addc $acc0,$acc0,$t0
544 $UMULH $t0,$m0,$mi # hi(n[4..7]*a[0]*n0)
545 adde $acc1,$acc1,$t1
546 $UMULH $t1,$m1,$mi
547 adde $acc2,$acc2,$t2
548 $UMULH $t2,$m2,$mi
549 adde $acc3,$acc3,$t3
550 adde $acc4,$acc4,$carry
551 $UMULH $t3,$m3,$mi
552 addze $carry,$zero
553 addi $mi,$sp,$SIZE_T*8
554 $LDX $mi,$mi,$cnt # next t[0]*n0
555 $STU $acc0,$SIZE_T($tp) # word of result
556 addc $acc0,$acc1,$t0
557 adde $acc1,$acc2,$t1
558 adde $acc2,$acc3,$t2
559 adde $acc3,$acc4,$t3
560 #addze $carry,$carry
561 bne .Loop_mul4x_1st_tail
562
563 sub $t1,$ap_end,$num # rewinded $ap
564 $UCMP $ap_end,$ap # done yet?
565 beq .Lmul4x_proceed
566
567 $LD $a0,$SIZE_T*1($ap)
568 $LD $a1,$SIZE_T*2($ap)
569 $LD $a2,$SIZE_T*3($ap)
570 $LDU $a3,$SIZE_T*4($ap)
571 $LD $m0,$SIZE_T*1($np)
572 $LD $m1,$SIZE_T*2($np)
573 $LD $m2,$SIZE_T*3($np)
574 $LDU $m3,$SIZE_T*4($np)
575 b .Loop_mul4x_1st_tail
576
577 .align 5
578 .Lmul4x_proceed:
579 $LDU $bi,$SIZE_T*4($bp) # *++b
580 addze $carry,$carry # topmost carry
581 $LD $a0,$SIZE_T*1($t1)
582 $LD $a1,$SIZE_T*2($t1)
583 $LD $a2,$SIZE_T*3($t1)
584 $LD $a3,$SIZE_T*4($t1)
585 addi $ap,$t1,$SIZE_T*4
586 sub $np,$np,$num # rewind np
587
588 $ST $acc0,$SIZE_T*1($tp) # result
589 $ST $acc1,$SIZE_T*2($tp)
590 $ST $acc2,$SIZE_T*3($tp)
591 $ST $acc3,$SIZE_T*4($tp)
592 $ST $carry,$SIZE_T*5($tp) # save topmost carry
593 $LD $acc0,$SIZE_T*12($sp) # t[0..3]
594 $LD $acc1,$SIZE_T*13($sp)
595 $LD $acc2,$SIZE_T*14($sp)
596 $LD $acc3,$SIZE_T*15($sp)
597
598 $LD $m0,$SIZE_T*1($np) # n[0..3]
599 $LD $m1,$SIZE_T*2($np)
600 $LD $m2,$SIZE_T*3($np)
601 $LDU $m3,$SIZE_T*4($np)
602 addic $tp,$sp,$SIZE_T*7 # &t[-1], clear carry bit
603 li $carry,0
604 b .Loop_mul4x_reduction
605
606 .align 5
607 .Loop_mul4x_reduction:
608 $UMULL $t0,$a0,$bi # lo(a[0..3]*b[4])
609 addze $carry,$carry # modulo-scheduled
610 $UMULL $t1,$a1,$bi
611 addi $cnt,$cnt,$SIZE_T
612 $UMULL $t2,$a2,$bi
613 andi. $cnt,$cnt,$SIZE_T*4-1
614 $UMULL $t3,$a3,$bi
615 addc $acc0,$acc0,$t0
616 $UMULH $t0,$a0,$bi # hi(a[0..3]*b[4])
617 adde $acc1,$acc1,$t1
618 $UMULH $t1,$a1,$bi
619 adde $acc2,$acc2,$t2
620 $UMULL $mi,$acc0,$n0 # t[0]*n0
621 adde $acc3,$acc3,$t3
622 $UMULH $t2,$a2,$bi
623 addze $acc4,$zero
624 $UMULH $t3,$a3,$bi
625 $LDX $bi,$bp,$cnt # next b[i]
626 addc $acc1,$acc1,$t0
627 # (*) mul $t0,$m0,$mi
628 $STU $mi,$SIZE_T($tp) # put aside t[0]*n0 for tail processing
629 adde $acc2,$acc2,$t1
630 $UMULL $t1,$m1,$mi # lo(n[0..3]*t[0]*n0
631 adde $acc3,$acc3,$t2
632 $UMULL $t2,$m2,$mi
633 adde $acc4,$acc4,$t3 # can't overflow
634 $UMULL $t3,$m3,$mi
635 # (*) addc $acc0,$acc0,$t0
636 addic $acc0,$acc0,-1 # (*), discarded
637 $UMULH $t0,$m0,$mi # hi(n[0..3]*t[0]*n0
638 adde $acc0,$acc1,$t1
639 $UMULH $t1,$m1,$mi
640 adde $acc1,$acc2,$t2
641 $UMULH $t2,$m2,$mi
642 adde $acc2,$acc3,$t3
643 $UMULH $t3,$m3,$mi
644 adde $acc3,$acc4,$carry
645 addze $carry,$zero
646 addc $acc0,$acc0,$t0
647 adde $acc1,$acc1,$t1
648 adde $acc2,$acc2,$t2
649 adde $acc3,$acc3,$t3
650 #addze $carry,$carry
651 bne .Loop_mul4x_reduction
652
653 $LD $t0,$SIZE_T*5($tp) # t[4..7]
654 addze $carry,$carry
655 $LD $t1,$SIZE_T*6($tp)
656 $LD $t2,$SIZE_T*7($tp)
657 $LD $t3,$SIZE_T*8($tp)
658 $LD $a0,$SIZE_T*1($ap) # a[4..7]
659 $LD $a1,$SIZE_T*2($ap)
660 $LD $a2,$SIZE_T*3($ap)
661 $LDU $a3,$SIZE_T*4($ap)
662 addc $acc0,$acc0,$t0
663 adde $acc1,$acc1,$t1
664 adde $acc2,$acc2,$t2
665 adde $acc3,$acc3,$t3
666 #addze $carry,$carry
667
668 $LD $mi,$SIZE_T*8($sp) # t[0]*n0
669 $LD $m0,$SIZE_T*1($np) # n[4..7]
670 $LD $m1,$SIZE_T*2($np)
671 $LD $m2,$SIZE_T*3($np)
672 $LDU $m3,$SIZE_T*4($np)
673 b .Loop_mul4x_tail
674
675 .align 5
676 .Loop_mul4x_tail:
677 $UMULL $t0,$a0,$bi # lo(a[4..7]*b[4])
678 addze $carry,$carry # modulo-scheduled
679 $UMULL $t1,$a1,$bi
680 addi $cnt,$cnt,$SIZE_T
681 $UMULL $t2,$a2,$bi
682 andi. $cnt,$cnt,$SIZE_T*4-1
683 $UMULL $t3,$a3,$bi
684 addc $acc0,$acc0,$t0
685 $UMULH $t0,$a0,$bi # hi(a[4..7]*b[4])
686 adde $acc1,$acc1,$t1
687 $UMULH $t1,$a1,$bi
688 adde $acc2,$acc2,$t2
689 $UMULH $t2,$a2,$bi
690 adde $acc3,$acc3,$t3
691 $UMULH $t3,$a3,$bi
692 addze $acc4,$zero
693 $LDX $bi,$bp,$cnt # next b[i]
694 addc $acc1,$acc1,$t0
695 $UMULL $t0,$m0,$mi # lo(n[4..7]*t[0]*n0)
696 adde $acc2,$acc2,$t1
697 $UMULL $t1,$m1,$mi
698 adde $acc3,$acc3,$t2
699 $UMULL $t2,$m2,$mi
700 adde $acc4,$acc4,$t3 # can't overflow
701 $UMULL $t3,$m3,$mi
702 addc $acc0,$acc0,$t0
703 $UMULH $t0,$m0,$mi # hi(n[4..7]*t[0]*n0)
704 adde $acc1,$acc1,$t1
705 $UMULH $t1,$m1,$mi
706 adde $acc2,$acc2,$t2
707 $UMULH $t2,$m2,$mi
708 adde $acc3,$acc3,$t3
709 $UMULH $t3,$m3,$mi
710 adde $acc4,$acc4,$carry
711 addi $mi,$sp,$SIZE_T*8
712 $LDX $mi,$mi,$cnt # next a[0]*n0
713 addze $carry,$zero
714 $STU $acc0,$SIZE_T($tp) # word of result
715 addc $acc0,$acc1,$t0
716 adde $acc1,$acc2,$t1
717 adde $acc2,$acc3,$t2
718 adde $acc3,$acc4,$t3
719 #addze $carry,$carry
720 bne .Loop_mul4x_tail
721
722 $LD $t0,$SIZE_T*5($tp) # next t[i] or topmost carry
723 sub $t1,$np,$num # rewinded np?
724 addze $carry,$carry
725 $UCMP $ap_end,$ap # done yet?
726 beq .Loop_mul4x_break
727
728 $LD $t1,$SIZE_T*6($tp)
729 $LD $t2,$SIZE_T*7($tp)
730 $LD $t3,$SIZE_T*8($tp)
731 $LD $a0,$SIZE_T*1($ap)
732 $LD $a1,$SIZE_T*2($ap)
733 $LD $a2,$SIZE_T*3($ap)
734 $LDU $a3,$SIZE_T*4($ap)
735 addc $acc0,$acc0,$t0
736 adde $acc1,$acc1,$t1
737 adde $acc2,$acc2,$t2
738 adde $acc3,$acc3,$t3
739 #addze $carry,$carry
740
741 $LD $m0,$SIZE_T*1($np) # n[4..7]
742 $LD $m1,$SIZE_T*2($np)
743 $LD $m2,$SIZE_T*3($np)
744 $LDU $m3,$SIZE_T*4($np)
745 b .Loop_mul4x_tail
746
747 .align 5
748 .Loop_mul4x_break:
749 $POP $t2,$SIZE_T*6($sp) # pull rp and &b[num-4]
750 $POP $t3,$SIZE_T*7($sp)
751 addc $a0,$acc0,$t0 # accumulate topmost carry
752 $LD $acc0,$SIZE_T*12($sp) # t[0..3]
753 addze $a1,$acc1
754 $LD $acc1,$SIZE_T*13($sp)
755 addze $a2,$acc2
756 $LD $acc2,$SIZE_T*14($sp)
757 addze $a3,$acc3
758 $LD $acc3,$SIZE_T*15($sp)
759 addze $carry,$carry # topmost carry
760 $ST $a0,$SIZE_T*1($tp) # result
761 sub $ap,$ap_end,$num # rewind ap
762 $ST $a1,$SIZE_T*2($tp)
763 $ST $a2,$SIZE_T*3($tp)
764 $ST $a3,$SIZE_T*4($tp)
765 $ST $carry,$SIZE_T*5($tp) # store topmost carry
766
767 $LD $m0,$SIZE_T*1($t1) # n[0..3]
768 $LD $m1,$SIZE_T*2($t1)
769 $LD $m2,$SIZE_T*3($t1)
770 $LD $m3,$SIZE_T*4($t1)
771 addi $np,$t1,$SIZE_T*4
772 $UCMP $bp,$t3 # done yet?
773 beq .Lmul4x_post
774
775 $LDU $bi,$SIZE_T*4($bp)
776 $LD $a0,$SIZE_T*1($ap) # a[0..3]
777 $LD $a1,$SIZE_T*2($ap)
778 $LD $a2,$SIZE_T*3($ap)
779 $LDU $a3,$SIZE_T*4($ap)
780 li $carry,0
781 addic $tp,$sp,$SIZE_T*7 # &t[-1], clear carry bit
782 b .Loop_mul4x_reduction
783
784 .align 5
785 .Lmul4x_post:
786 # Final step. We see if result is larger than modulus, and
787 # if it is, subtract the modulus. But comparison implies
788 # subtraction. So we subtract modulus, see if it borrowed,
789 # and conditionally copy original value.
790 srwi $cnt,$num,`log($SIZE_T)/log(2)+2`
791 mr $bp,$t2 # &rp[-1]
792 subi $cnt,$cnt,1
793 mr $ap_end,$t2 # &rp[-1] copy
794 subfc $t0,$m0,$acc0
795 addi $tp,$sp,$SIZE_T*15
796 subfe $t1,$m1,$acc1
797
798 mtctr $cnt
799 .Lmul4x_sub:
800 $LD $m0,$SIZE_T*1($np)
801 $LD $acc0,$SIZE_T*1($tp)
802 subfe $t2,$m2,$acc2
803 $LD $m1,$SIZE_T*2($np)
804 $LD $acc1,$SIZE_T*2($tp)
805 subfe $t3,$m3,$acc3
806 $LD $m2,$SIZE_T*3($np)
807 $LD $acc2,$SIZE_T*3($tp)
808 $LDU $m3,$SIZE_T*4($np)
809 $LDU $acc3,$SIZE_T*4($tp)
810 $ST $t0,$SIZE_T*1($bp)
811 $ST $t1,$SIZE_T*2($bp)
812 subfe $t0,$m0,$acc0
813 $ST $t2,$SIZE_T*3($bp)
814 $STU $t3,$SIZE_T*4($bp)
815 subfe $t1,$m1,$acc1
816 bdnz .Lmul4x_sub
817
818 $LD $a0,$SIZE_T*1($ap_end)
819 $ST $t0,$SIZE_T*1($bp)
820 $LD $t0,$SIZE_T*12($sp)
821 subfe $t2,$m2,$acc2
822 $LD $a1,$SIZE_T*2($ap_end)
823 $ST $t1,$SIZE_T*2($bp)
824 $LD $t1,$SIZE_T*13($sp)
825 subfe $t3,$m3,$acc3
826 subfe $carry,$zero,$carry # did it borrow?
827 addi $tp,$sp,$SIZE_T*12
828 $LD $a2,$SIZE_T*3($ap_end)
829 $ST $t2,$SIZE_T*3($bp)
830 $LD $t2,$SIZE_T*14($sp)
831 $LD $a3,$SIZE_T*4($ap_end)
832 $ST $t3,$SIZE_T*4($bp)
833 $LD $t3,$SIZE_T*15($sp)
834
835 mtctr $cnt
836 .Lmul4x_cond_copy:
837 and $t0,$t0,$carry
838 andc $a0,$a0,$carry
839 $ST $zero,$SIZE_T*0($tp) # wipe stack clean
840 and $t1,$t1,$carry
841 andc $a1,$a1,$carry
842 $ST $zero,$SIZE_T*1($tp)
843 and $t2,$t2,$carry
844 andc $a2,$a2,$carry
845 $ST $zero,$SIZE_T*2($tp)
846 and $t3,$t3,$carry
847 andc $a3,$a3,$carry
848 $ST $zero,$SIZE_T*3($tp)
849 or $acc0,$t0,$a0
850 $LD $a0,$SIZE_T*5($ap_end)
851 $LD $t0,$SIZE_T*4($tp)
852 or $acc1,$t1,$a1
853 $LD $a1,$SIZE_T*6($ap_end)
854 $LD $t1,$SIZE_T*5($tp)
855 or $acc2,$t2,$a2
856 $LD $a2,$SIZE_T*7($ap_end)
857 $LD $t2,$SIZE_T*6($tp)
858 or $acc3,$t3,$a3
859 $LD $a3,$SIZE_T*8($ap_end)
860 $LD $t3,$SIZE_T*7($tp)
861 addi $tp,$tp,$SIZE_T*4
862 $ST $acc0,$SIZE_T*1($ap_end)
863 $ST $acc1,$SIZE_T*2($ap_end)
864 $ST $acc2,$SIZE_T*3($ap_end)
865 $STU $acc3,$SIZE_T*4($ap_end)
866 bdnz .Lmul4x_cond_copy
867
868 $POP $bp,0($sp) # pull saved sp
869 and $t0,$t0,$carry
870 andc $a0,$a0,$carry
871 $ST $zero,$SIZE_T*0($tp)
872 and $t1,$t1,$carry
873 andc $a1,$a1,$carry
874 $ST $zero,$SIZE_T*1($tp)
875 and $t2,$t2,$carry
876 andc $a2,$a2,$carry
877 $ST $zero,$SIZE_T*2($tp)
878 and $t3,$t3,$carry
879 andc $a3,$a3,$carry
880 $ST $zero,$SIZE_T*3($tp)
881 or $acc0,$t0,$a0
882 or $acc1,$t1,$a1
883 $ST $zero,$SIZE_T*4($tp)
884 or $acc2,$t2,$a2
885 or $acc3,$t3,$a3
886 $ST $acc0,$SIZE_T*1($ap_end)
887 $ST $acc1,$SIZE_T*2($ap_end)
888 $ST $acc2,$SIZE_T*3($ap_end)
889 $ST $acc3,$SIZE_T*4($ap_end)
890
891 b .Lmul4x_done
892
893 .align 4
894 .Lmul4x4_post_condition:
895 $POP $ap,$SIZE_T*6($sp) # pull &rp[-1]
896 $POP $bp,0($sp) # pull saved sp
897 addze $carry,$carry # modulo-scheduled
898 # $acc0-3,$carry hold result, $m0-3 hold modulus
899 subfc $a0,$m0,$acc0
900 subfe $a1,$m1,$acc1
901 subfe $a2,$m2,$acc2
902 subfe $a3,$m3,$acc3
903 subfe $carry,$zero,$carry # did it borrow?
904
905 and $m0,$m0,$carry
906 and $m1,$m1,$carry
907 addc $a0,$a0,$m0
908 and $m2,$m2,$carry
909 adde $a1,$a1,$m1
910 and $m3,$m3,$carry
911 adde $a2,$a2,$m2
912 adde $a3,$a3,$m3
913
914 $ST $a0,$SIZE_T*1($ap) # write result
915 $ST $a1,$SIZE_T*2($ap)
916 $ST $a2,$SIZE_T*3($ap)
917 $ST $a3,$SIZE_T*4($ap)
918
919 .Lmul4x_done:
920 $ST $zero,$SIZE_T*8($sp) # wipe stack clean
921 $ST $zero,$SIZE_T*9($sp)
922 $ST $zero,$SIZE_T*10($sp)
923 $ST $zero,$SIZE_T*11($sp)
924 li r3,1 # signal "done"
925 $POP r14,-$SIZE_T*18($bp)
926 $POP r15,-$SIZE_T*17($bp)
927 $POP r16,-$SIZE_T*16($bp)
928 $POP r17,-$SIZE_T*15($bp)
929 $POP r18,-$SIZE_T*14($bp)
930 $POP r19,-$SIZE_T*13($bp)
931 $POP r20,-$SIZE_T*12($bp)
932 $POP r21,-$SIZE_T*11($bp)
933 $POP r22,-$SIZE_T*10($bp)
934 $POP r23,-$SIZE_T*9($bp)
935 $POP r24,-$SIZE_T*8($bp)
936 $POP r25,-$SIZE_T*7($bp)
937 $POP r26,-$SIZE_T*6($bp)
938 $POP r27,-$SIZE_T*5($bp)
939 $POP r28,-$SIZE_T*4($bp)
940 $POP r29,-$SIZE_T*3($bp)
941 $POP r30,-$SIZE_T*2($bp)
942 $POP r31,-$SIZE_T*1($bp)
943 mr $sp,$bp
944 blr
945 .long 0
946 .byte 0,12,4,0x20,0x80,18,6,0
947 .long 0
948 .size .bn_mul4x_mont_int,.-.bn_mul4x_mont_int
949 ___
950 }
951
952 if (1) {
953 ########################################################################
954 # Following is PPC adaptation of sqrx8x_mont from x86_64-mont5 module.
955
956 my ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("r$_",(9..12,14..17));
957 my ($t0,$t1,$t2,$t3)=map("r$_",(18..21));
958 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5,$acc6,$acc7)=map("r$_",(22..29));
959 my ($cnt,$carry,$zero)=("r30","r31","r0");
960 my ($tp,$ap_end,$na0)=($bp,$np,$carry);
961
962 # sp----------->+-------------------------------+
963 # | saved sp |
964 # +-------------------------------+
965 # . .
966 # +12*size_t +-------------------------------+
967 # | size_t tmp[2*num] |
968 # . .
969 # . .
970 # . .
971 # +-------------------------------+
972 # . .
973 # -18*size_t +-------------------------------+
974 # | 18 saved gpr, r14-r31 |
975 # . .
976 # . .
977 # +-------------------------------+
978 $code.=<<___;
979 .align 5
980 __bn_sqr8x_mont:
981 .Lsqr8x_do:
982 mr $a0,$sp
983 slwi $a1,$num,`log($SIZE_T)/log(2)+1`
984 li $a2,-32*$SIZE_T
985 sub $a1,$a2,$a1
986 slwi $num,$num,`log($SIZE_T)/log(2)`
987 $STUX $sp,$sp,$a1 # alloca
988
989 $PUSH r14,-$SIZE_T*18($a0)
990 $PUSH r15,-$SIZE_T*17($a0)
991 $PUSH r16,-$SIZE_T*16($a0)
992 $PUSH r17,-$SIZE_T*15($a0)
993 $PUSH r18,-$SIZE_T*14($a0)
994 $PUSH r19,-$SIZE_T*13($a0)
995 $PUSH r20,-$SIZE_T*12($a0)
996 $PUSH r21,-$SIZE_T*11($a0)
997 $PUSH r22,-$SIZE_T*10($a0)
998 $PUSH r23,-$SIZE_T*9($a0)
999 $PUSH r24,-$SIZE_T*8($a0)
1000 $PUSH r25,-$SIZE_T*7($a0)
1001 $PUSH r26,-$SIZE_T*6($a0)
1002 $PUSH r27,-$SIZE_T*5($a0)
1003 $PUSH r28,-$SIZE_T*4($a0)
1004 $PUSH r29,-$SIZE_T*3($a0)
1005 $PUSH r30,-$SIZE_T*2($a0)
1006 $PUSH r31,-$SIZE_T*1($a0)
1007
1008 subi $ap,$ap,$SIZE_T # bias by -1
1009 subi $t0,$np,$SIZE_T # bias by -1
1010 subi $rp,$rp,$SIZE_T # bias by -1
1011 $LD $n0,0($n0) # *n0
1012 li $zero,0
1013
1014 add $ap_end,$ap,$num
1015 $LD $a0,$SIZE_T*1($ap)
1016 #li $acc0,0
1017 $LD $a1,$SIZE_T*2($ap)
1018 li $acc1,0
1019 $LD $a2,$SIZE_T*3($ap)
1020 li $acc2,0
1021 $LD $a3,$SIZE_T*4($ap)
1022 li $acc3,0
1023 $LD $a4,$SIZE_T*5($ap)
1024 li $acc4,0
1025 $LD $a5,$SIZE_T*6($ap)
1026 li $acc5,0
1027 $LD $a6,$SIZE_T*7($ap)
1028 li $acc6,0
1029 $LDU $a7,$SIZE_T*8($ap)
1030 li $acc7,0
1031
1032 addi $tp,$sp,$SIZE_T*11 # &tp[-1]
1033 subic. $cnt,$num,$SIZE_T*8
1034 b .Lsqr8x_zero_start
1035
1036 .align 5
1037 .Lsqr8x_zero:
1038 subic. $cnt,$cnt,$SIZE_T*8
1039 $ST $zero,$SIZE_T*1($tp)
1040 $ST $zero,$SIZE_T*2($tp)
1041 $ST $zero,$SIZE_T*3($tp)
1042 $ST $zero,$SIZE_T*4($tp)
1043 $ST $zero,$SIZE_T*5($tp)
1044 $ST $zero,$SIZE_T*6($tp)
1045 $ST $zero,$SIZE_T*7($tp)
1046 $ST $zero,$SIZE_T*8($tp)
1047 .Lsqr8x_zero_start:
1048 $ST $zero,$SIZE_T*9($tp)
1049 $ST $zero,$SIZE_T*10($tp)
1050 $ST $zero,$SIZE_T*11($tp)
1051 $ST $zero,$SIZE_T*12($tp)
1052 $ST $zero,$SIZE_T*13($tp)
1053 $ST $zero,$SIZE_T*14($tp)
1054 $ST $zero,$SIZE_T*15($tp)
1055 $STU $zero,$SIZE_T*16($tp)
1056 bne .Lsqr8x_zero
1057
1058 $PUSH $rp,$SIZE_T*6($sp) # offload &rp[-1]
1059 $PUSH $t0,$SIZE_T*7($sp) # offload &np[-1]
1060 $PUSH $n0,$SIZE_T*8($sp) # offload n0
1061 $PUSH $tp,$SIZE_T*9($sp) # &tp[2*num-1]
1062 $PUSH $zero,$SIZE_T*10($sp) # initial top-most carry
1063 addi $tp,$sp,$SIZE_T*11 # &tp[-1]
1064
1065 # Multiply everything but a[i]*a[i]
1066 .align 5
1067 .Lsqr8x_outer_loop:
1068 # a[1]a[0] (i)
1069 # a[2]a[0]
1070 # a[3]a[0]
1071 # a[4]a[0]
1072 # a[5]a[0]
1073 # a[6]a[0]
1074 # a[7]a[0]
1075 # a[2]a[1] (ii)
1076 # a[3]a[1]
1077 # a[4]a[1]
1078 # a[5]a[1]
1079 # a[6]a[1]
1080 # a[7]a[1]
1081 # a[3]a[2] (iii)
1082 # a[4]a[2]
1083 # a[5]a[2]
1084 # a[6]a[2]
1085 # a[7]a[2]
1086 # a[4]a[3] (iv)
1087 # a[5]a[3]
1088 # a[6]a[3]
1089 # a[7]a[3]
1090 # a[5]a[4] (v)
1091 # a[6]a[4]
1092 # a[7]a[4]
1093 # a[6]a[5] (vi)
1094 # a[7]a[5]
1095 # a[7]a[6] (vii)
1096
1097 $UMULL $t0,$a1,$a0 # lo(a[1..7]*a[0]) (i)
1098 $UMULL $t1,$a2,$a0
1099 $UMULL $t2,$a3,$a0
1100 $UMULL $t3,$a4,$a0
1101 addc $acc1,$acc1,$t0 # t[1]+lo(a[1]*a[0])
1102 $UMULL $t0,$a5,$a0
1103 adde $acc2,$acc2,$t1
1104 $UMULL $t1,$a6,$a0
1105 adde $acc3,$acc3,$t2
1106 $UMULL $t2,$a7,$a0
1107 adde $acc4,$acc4,$t3
1108 $UMULH $t3,$a1,$a0 # hi(a[1..7]*a[0])
1109 adde $acc5,$acc5,$t0
1110 $UMULH $t0,$a2,$a0
1111 adde $acc6,$acc6,$t1
1112 $UMULH $t1,$a3,$a0
1113 adde $acc7,$acc7,$t2
1114 $UMULH $t2,$a4,$a0
1115 $ST $acc0,$SIZE_T*1($tp) # t[0]
1116 addze $acc0,$zero # t[8]
1117 $ST $acc1,$SIZE_T*2($tp) # t[1]
1118 addc $acc2,$acc2,$t3 # t[2]+lo(a[1]*a[0])
1119 $UMULH $t3,$a5,$a0
1120 adde $acc3,$acc3,$t0
1121 $UMULH $t0,$a6,$a0
1122 adde $acc4,$acc4,$t1
1123 $UMULH $t1,$a7,$a0
1124 adde $acc5,$acc5,$t2
1125 $UMULL $t2,$a2,$a1 # lo(a[2..7]*a[1]) (ii)
1126 adde $acc6,$acc6,$t3
1127 $UMULL $t3,$a3,$a1
1128 adde $acc7,$acc7,$t0
1129 $UMULL $t0,$a4,$a1
1130 adde $acc0,$acc0,$t1
1131
1132 $UMULL $t1,$a5,$a1
1133 addc $acc3,$acc3,$t2
1134 $UMULL $t2,$a6,$a1
1135 adde $acc4,$acc4,$t3
1136 $UMULL $t3,$a7,$a1
1137 adde $acc5,$acc5,$t0
1138 $UMULH $t0,$a2,$a1 # hi(a[2..7]*a[1])
1139 adde $acc6,$acc6,$t1
1140 $UMULH $t1,$a3,$a1
1141 adde $acc7,$acc7,$t2
1142 $UMULH $t2,$a4,$a1
1143 adde $acc0,$acc0,$t3
1144 $UMULH $t3,$a5,$a1
1145 $ST $acc2,$SIZE_T*3($tp) # t[2]
1146 addze $acc1,$zero # t[9]
1147 $ST $acc3,$SIZE_T*4($tp) # t[3]
1148 addc $acc4,$acc4,$t0
1149 $UMULH $t0,$a6,$a1
1150 adde $acc5,$acc5,$t1
1151 $UMULH $t1,$a7,$a1
1152 adde $acc6,$acc6,$t2
1153 $UMULL $t2,$a3,$a2 # lo(a[3..7]*a[2]) (iii)
1154 adde $acc7,$acc7,$t3
1155 $UMULL $t3,$a4,$a2
1156 adde $acc0,$acc0,$t0
1157 $UMULL $t0,$a5,$a2
1158 adde $acc1,$acc1,$t1
1159
1160 $UMULL $t1,$a6,$a2
1161 addc $acc5,$acc5,$t2
1162 $UMULL $t2,$a7,$a2
1163 adde $acc6,$acc6,$t3
1164 $UMULH $t3,$a3,$a2 # hi(a[3..7]*a[2])
1165 adde $acc7,$acc7,$t0
1166 $UMULH $t0,$a4,$a2
1167 adde $acc0,$acc0,$t1
1168 $UMULH $t1,$a5,$a2
1169 adde $acc1,$acc1,$t2
1170 $UMULH $t2,$a6,$a2
1171 $ST $acc4,$SIZE_T*5($tp) # t[4]
1172 addze $acc2,$zero # t[10]
1173 $ST $acc5,$SIZE_T*6($tp) # t[5]
1174 addc $acc6,$acc6,$t3
1175 $UMULH $t3,$a7,$a2
1176 adde $acc7,$acc7,$t0
1177 $UMULL $t0,$a4,$a3 # lo(a[4..7]*a[3]) (iv)
1178 adde $acc0,$acc0,$t1
1179 $UMULL $t1,$a5,$a3
1180 adde $acc1,$acc1,$t2
1181 $UMULL $t2,$a6,$a3
1182 adde $acc2,$acc2,$t3
1183
1184 $UMULL $t3,$a7,$a3
1185 addc $acc7,$acc7,$t0
1186 $UMULH $t0,$a4,$a3 # hi(a[4..7]*a[3])
1187 adde $acc0,$acc0,$t1
1188 $UMULH $t1,$a5,$a3
1189 adde $acc1,$acc1,$t2
1190 $UMULH $t2,$a6,$a3
1191 adde $acc2,$acc2,$t3
1192 $UMULH $t3,$a7,$a3
1193 $ST $acc6,$SIZE_T*7($tp) # t[6]
1194 addze $acc3,$zero # t[11]
1195 $STU $acc7,$SIZE_T*8($tp) # t[7]
1196 addc $acc0,$acc0,$t0
1197 $UMULL $t0,$a5,$a4 # lo(a[5..7]*a[4]) (v)
1198 adde $acc1,$acc1,$t1
1199 $UMULL $t1,$a6,$a4
1200 adde $acc2,$acc2,$t2
1201 $UMULL $t2,$a7,$a4
1202 adde $acc3,$acc3,$t3
1203
1204 $UMULH $t3,$a5,$a4 # hi(a[5..7]*a[4])
1205 addc $acc1,$acc1,$t0
1206 $UMULH $t0,$a6,$a4
1207 adde $acc2,$acc2,$t1
1208 $UMULH $t1,$a7,$a4
1209 adde $acc3,$acc3,$t2
1210 $UMULL $t2,$a6,$a5 # lo(a[6..7]*a[5]) (vi)
1211 addze $acc4,$zero # t[12]
1212 addc $acc2,$acc2,$t3
1213 $UMULL $t3,$a7,$a5
1214 adde $acc3,$acc3,$t0
1215 $UMULH $t0,$a6,$a5 # hi(a[6..7]*a[5])
1216 adde $acc4,$acc4,$t1
1217
1218 $UMULH $t1,$a7,$a5
1219 addc $acc3,$acc3,$t2
1220 $UMULL $t2,$a7,$a6 # lo(a[7]*a[6]) (vii)
1221 adde $acc4,$acc4,$t3
1222 $UMULH $t3,$a7,$a6 # hi(a[7]*a[6])
1223 addze $acc5,$zero # t[13]
1224 addc $acc4,$acc4,$t0
1225 $UCMP $ap_end,$ap # done yet?
1226 adde $acc5,$acc5,$t1
1227
1228 addc $acc5,$acc5,$t2
1229 sub $t0,$ap_end,$num # rewinded ap
1230 addze $acc6,$zero # t[14]
1231 add $acc6,$acc6,$t3
1232
1233 beq .Lsqr8x_outer_break
1234
1235 mr $n0,$a0
1236 $LD $a0,$SIZE_T*1($tp)
1237 $LD $a1,$SIZE_T*2($tp)
1238 $LD $a2,$SIZE_T*3($tp)
1239 $LD $a3,$SIZE_T*4($tp)
1240 $LD $a4,$SIZE_T*5($tp)
1241 $LD $a5,$SIZE_T*6($tp)
1242 $LD $a6,$SIZE_T*7($tp)
1243 $LD $a7,$SIZE_T*8($tp)
1244 addc $acc0,$acc0,$a0
1245 $LD $a0,$SIZE_T*1($ap)
1246 adde $acc1,$acc1,$a1
1247 $LD $a1,$SIZE_T*2($ap)
1248 adde $acc2,$acc2,$a2
1249 $LD $a2,$SIZE_T*3($ap)
1250 adde $acc3,$acc3,$a3
1251 $LD $a3,$SIZE_T*4($ap)
1252 adde $acc4,$acc4,$a4
1253 $LD $a4,$SIZE_T*5($ap)
1254 adde $acc5,$acc5,$a5
1255 $LD $a5,$SIZE_T*6($ap)
1256 adde $acc6,$acc6,$a6
1257 $LD $a6,$SIZE_T*7($ap)
1258 subi $rp,$ap,$SIZE_T*7
1259 addze $acc7,$a7
1260 $LDU $a7,$SIZE_T*8($ap)
1261 #addze $carry,$zero # moved below
1262 li $cnt,0
1263 b .Lsqr8x_mul
1264
1265 # a[8]a[0]
1266 # a[9]a[0]
1267 # a[a]a[0]
1268 # a[b]a[0]
1269 # a[c]a[0]
1270 # a[d]a[0]
1271 # a[e]a[0]
1272 # a[f]a[0]
1273 # a[8]a[1]
1274 # a[f]a[1]........................
1275 # a[8]a[2]
1276 # a[f]a[2]........................
1277 # a[8]a[3]
1278 # a[f]a[3]........................
1279 # a[8]a[4]
1280 # a[f]a[4]........................
1281 # a[8]a[5]
1282 # a[f]a[5]........................
1283 # a[8]a[6]
1284 # a[f]a[6]........................
1285 # a[8]a[7]
1286 # a[f]a[7]........................
1287 .align 5
1288 .Lsqr8x_mul:
1289 $UMULL $t0,$a0,$n0
1290 addze $carry,$zero # carry bit, modulo-scheduled
1291 $UMULL $t1,$a1,$n0
1292 addi $cnt,$cnt,$SIZE_T
1293 $UMULL $t2,$a2,$n0
1294 andi. $cnt,$cnt,$SIZE_T*8-1
1295 $UMULL $t3,$a3,$n0
1296 addc $acc0,$acc0,$t0
1297 $UMULL $t0,$a4,$n0
1298 adde $acc1,$acc1,$t1
1299 $UMULL $t1,$a5,$n0
1300 adde $acc2,$acc2,$t2
1301 $UMULL $t2,$a6,$n0
1302 adde $acc3,$acc3,$t3
1303 $UMULL $t3,$a7,$n0
1304 adde $acc4,$acc4,$t0
1305 $UMULH $t0,$a0,$n0
1306 adde $acc5,$acc5,$t1
1307 $UMULH $t1,$a1,$n0
1308 adde $acc6,$acc6,$t2
1309 $UMULH $t2,$a2,$n0
1310 adde $acc7,$acc7,$t3
1311 $UMULH $t3,$a3,$n0
1312 addze $carry,$carry
1313 $STU $acc0,$SIZE_T($tp)
1314 addc $acc0,$acc1,$t0
1315 $UMULH $t0,$a4,$n0
1316 adde $acc1,$acc2,$t1
1317 $UMULH $t1,$a5,$n0
1318 adde $acc2,$acc3,$t2
1319 $UMULH $t2,$a6,$n0
1320 adde $acc3,$acc4,$t3
1321 $UMULH $t3,$a7,$n0
1322 $LDX $n0,$rp,$cnt
1323 adde $acc4,$acc5,$t0
1324 adde $acc5,$acc6,$t1
1325 adde $acc6,$acc7,$t2
1326 adde $acc7,$carry,$t3
1327 #addze $carry,$zero # moved above
1328 bne .Lsqr8x_mul
1329 # note that carry flag is guaranteed
1330 # to be zero at this point
1331 $UCMP $ap,$ap_end # done yet?
1332 beq .Lsqr8x_break
1333
1334 $LD $a0,$SIZE_T*1($tp)
1335 $LD $a1,$SIZE_T*2($tp)
1336 $LD $a2,$SIZE_T*3($tp)
1337 $LD $a3,$SIZE_T*4($tp)
1338 $LD $a4,$SIZE_T*5($tp)
1339 $LD $a5,$SIZE_T*6($tp)
1340 $LD $a6,$SIZE_T*7($tp)
1341 $LD $a7,$SIZE_T*8($tp)
1342 addc $acc0,$acc0,$a0
1343 $LD $a0,$SIZE_T*1($ap)
1344 adde $acc1,$acc1,$a1
1345 $LD $a1,$SIZE_T*2($ap)
1346 adde $acc2,$acc2,$a2
1347 $LD $a2,$SIZE_T*3($ap)
1348 adde $acc3,$acc3,$a3
1349 $LD $a3,$SIZE_T*4($ap)
1350 adde $acc4,$acc4,$a4
1351 $LD $a4,$SIZE_T*5($ap)
1352 adde $acc5,$acc5,$a5
1353 $LD $a5,$SIZE_T*6($ap)
1354 adde $acc6,$acc6,$a6
1355 $LD $a6,$SIZE_T*7($ap)
1356 adde $acc7,$acc7,$a7
1357 $LDU $a7,$SIZE_T*8($ap)
1358 #addze $carry,$zero # moved above
1359 b .Lsqr8x_mul
1360
1361 .align 5
1362 .Lsqr8x_break:
1363 $LD $a0,$SIZE_T*8($rp)
1364 addi $ap,$rp,$SIZE_T*15
1365 $LD $a1,$SIZE_T*9($rp)
1366 sub. $t0,$ap_end,$ap # is it last iteration?
1367 $LD $a2,$SIZE_T*10($rp)
1368 sub $t1,$tp,$t0
1369 $LD $a3,$SIZE_T*11($rp)
1370 $LD $a4,$SIZE_T*12($rp)
1371 $LD $a5,$SIZE_T*13($rp)
1372 $LD $a6,$SIZE_T*14($rp)
1373 $LD $a7,$SIZE_T*15($rp)
1374 beq .Lsqr8x_outer_loop
1375
1376 $ST $acc0,$SIZE_T*1($tp)
1377 $LD $acc0,$SIZE_T*1($t1)
1378 $ST $acc1,$SIZE_T*2($tp)
1379 $LD $acc1,$SIZE_T*2($t1)
1380 $ST $acc2,$SIZE_T*3($tp)
1381 $LD $acc2,$SIZE_T*3($t1)
1382 $ST $acc3,$SIZE_T*4($tp)
1383 $LD $acc3,$SIZE_T*4($t1)
1384 $ST $acc4,$SIZE_T*5($tp)
1385 $LD $acc4,$SIZE_T*5($t1)
1386 $ST $acc5,$SIZE_T*6($tp)
1387 $LD $acc5,$SIZE_T*6($t1)
1388 $ST $acc6,$SIZE_T*7($tp)
1389 $LD $acc6,$SIZE_T*7($t1)
1390 $ST $acc7,$SIZE_T*8($tp)
1391 $LD $acc7,$SIZE_T*8($t1)
1392 mr $tp,$t1
1393 b .Lsqr8x_outer_loop
1394
1395 .align 5
1396 .Lsqr8x_outer_break:
1397 ####################################################################
1398 # Now multiply above result by 2 and add a[n-1]*a[n-1]|...|a[0]*a[0]
1399 $LD $a1,$SIZE_T*1($t0) # recall that $t0 is &a[-1]
1400 $LD $a3,$SIZE_T*2($t0)
1401 $LD $a5,$SIZE_T*3($t0)
1402 $LD $a7,$SIZE_T*4($t0)
1403 addi $ap,$t0,$SIZE_T*4
1404 # "tp[x]" comments are for num==8 case
1405 $LD $t1,$SIZE_T*13($sp) # =tp[1], t[0] is not interesting
1406 $LD $t2,$SIZE_T*14($sp)
1407 $LD $t3,$SIZE_T*15($sp)
1408 $LD $t0,$SIZE_T*16($sp)
1409
1410 $ST $acc0,$SIZE_T*1($tp) # tp[8]=
1411 srwi $cnt,$num,`log($SIZE_T)/log(2)+2`
1412 $ST $acc1,$SIZE_T*2($tp)
1413 subi $cnt,$cnt,1
1414 $ST $acc2,$SIZE_T*3($tp)
1415 $ST $acc3,$SIZE_T*4($tp)
1416 $ST $acc4,$SIZE_T*5($tp)
1417 $ST $acc5,$SIZE_T*6($tp)
1418 $ST $acc6,$SIZE_T*7($tp)
1419 #$ST $acc7,$SIZE_T*8($tp) # tp[15] is not interesting
1420 addi $tp,$sp,$SIZE_T*11 # &tp[-1]
1421 $UMULL $acc0,$a1,$a1
1422 $UMULH $a1,$a1,$a1
1423 add $acc1,$t1,$t1 # <<1
1424 $SHRI $t1,$t1,$BITS-1
1425 $UMULL $a2,$a3,$a3
1426 $UMULH $a3,$a3,$a3
1427 addc $acc1,$acc1,$a1
1428 add $acc2,$t2,$t2
1429 $SHRI $t2,$t2,$BITS-1
1430 add $acc3,$t3,$t3
1431 $SHRI $t3,$t3,$BITS-1
1432 or $acc2,$acc2,$t1
1433
1434 mtctr $cnt
1435 .Lsqr4x_shift_n_add:
1436 $UMULL $a4,$a5,$a5
1437 $UMULH $a5,$a5,$a5
1438 $LD $t1,$SIZE_T*6($tp) # =tp[5]
1439 $LD $a1,$SIZE_T*1($ap)
1440 adde $acc2,$acc2,$a2
1441 add $acc4,$t0,$t0
1442 $SHRI $t0,$t0,$BITS-1
1443 or $acc3,$acc3,$t2
1444 $LD $t2,$SIZE_T*7($tp) # =tp[6]
1445 adde $acc3,$acc3,$a3
1446 $LD $a3,$SIZE_T*2($ap)
1447 add $acc5,$t1,$t1
1448 $SHRI $t1,$t1,$BITS-1
1449 or $acc4,$acc4,$t3
1450 $LD $t3,$SIZE_T*8($tp) # =tp[7]
1451 $UMULL $a6,$a7,$a7
1452 $UMULH $a7,$a7,$a7
1453 adde $acc4,$acc4,$a4
1454 add $acc6,$t2,$t2
1455 $SHRI $t2,$t2,$BITS-1
1456 or $acc5,$acc5,$t0
1457 $LD $t0,$SIZE_T*9($tp) # =tp[8]
1458 adde $acc5,$acc5,$a5
1459 $LD $a5,$SIZE_T*3($ap)
1460 add $acc7,$t3,$t3
1461 $SHRI $t3,$t3,$BITS-1
1462 or $acc6,$acc6,$t1
1463 $LD $t1,$SIZE_T*10($tp) # =tp[9]
1464 $UMULL $a0,$a1,$a1
1465 $UMULH $a1,$a1,$a1
1466 adde $acc6,$acc6,$a6
1467 $ST $acc0,$SIZE_T*1($tp) # tp[0]=
1468 add $acc0,$t0,$t0
1469 $SHRI $t0,$t0,$BITS-1
1470 or $acc7,$acc7,$t2
1471 $LD $t2,$SIZE_T*11($tp) # =tp[10]
1472 adde $acc7,$acc7,$a7
1473 $LDU $a7,$SIZE_T*4($ap)
1474 $ST $acc1,$SIZE_T*2($tp) # tp[1]=
1475 add $acc1,$t1,$t1
1476 $SHRI $t1,$t1,$BITS-1
1477 or $acc0,$acc0,$t3
1478 $LD $t3,$SIZE_T*12($tp) # =tp[11]
1479 $UMULL $a2,$a3,$a3
1480 $UMULH $a3,$a3,$a3
1481 adde $acc0,$acc0,$a0
1482 $ST $acc2,$SIZE_T*3($tp) # tp[2]=
1483 add $acc2,$t2,$t2
1484 $SHRI $t2,$t2,$BITS-1
1485 or $acc1,$acc1,$t0
1486 $LD $t0,$SIZE_T*13($tp) # =tp[12]
1487 adde $acc1,$acc1,$a1
1488 $ST $acc3,$SIZE_T*4($tp) # tp[3]=
1489 $ST $acc4,$SIZE_T*5($tp) # tp[4]=
1490 $ST $acc5,$SIZE_T*6($tp) # tp[5]=
1491 $ST $acc6,$SIZE_T*7($tp) # tp[6]=
1492 $STU $acc7,$SIZE_T*8($tp) # tp[7]=
1493 add $acc3,$t3,$t3
1494 $SHRI $t3,$t3,$BITS-1
1495 or $acc2,$acc2,$t1
1496 bdnz .Lsqr4x_shift_n_add
1497 ___
1498 my ($np,$np_end)=($ap,$ap_end);
1499 $code.=<<___;
1500 $POP $np,$SIZE_T*7($sp) # pull &np[-1] and n0
1501 $POP $n0,$SIZE_T*8($sp)
1502
1503 $UMULL $a4,$a5,$a5
1504 $UMULH $a5,$a5,$a5
1505 $ST $acc0,$SIZE_T*1($tp) # tp[8]=
1506 $LD $acc0,$SIZE_T*12($sp) # =tp[0]
1507 $LD $t1,$SIZE_T*6($tp) # =tp[13]
1508 adde $acc2,$acc2,$a2
1509 add $acc4,$t0,$t0
1510 $SHRI $t0,$t0,$BITS-1
1511 or $acc3,$acc3,$t2
1512 $LD $t2,$SIZE_T*7($tp) # =tp[14]
1513 adde $acc3,$acc3,$a3
1514 add $acc5,$t1,$t1
1515 $SHRI $t1,$t1,$BITS-1
1516 or $acc4,$acc4,$t3
1517 $UMULL $a6,$a7,$a7
1518 $UMULH $a7,$a7,$a7
1519 adde $acc4,$acc4,$a4
1520 add $acc6,$t2,$t2
1521 $SHRI $t2,$t2,$BITS-1
1522 or $acc5,$acc5,$t0
1523 $ST $acc1,$SIZE_T*2($tp) # tp[9]=
1524 $LD $acc1,$SIZE_T*13($sp) # =tp[1]
1525 adde $acc5,$acc5,$a5
1526 or $acc6,$acc6,$t1
1527 $LD $a0,$SIZE_T*1($np)
1528 $LD $a1,$SIZE_T*2($np)
1529 adde $acc6,$acc6,$a6
1530 $LD $a2,$SIZE_T*3($np)
1531 $LD $a3,$SIZE_T*4($np)
1532 adde $acc7,$a7,$t2
1533 $LD $a4,$SIZE_T*5($np)
1534 $LD $a5,$SIZE_T*6($np)
1535
1536 ################################################################
1537 # Reduce by 8 limbs per iteration
1538 $UMULL $na0,$n0,$acc0 # t[0]*n0
1539 li $cnt,8
1540 $LD $a6,$SIZE_T*7($np)
1541 add $np_end,$np,$num
1542 $LDU $a7,$SIZE_T*8($np)
1543 $ST $acc2,$SIZE_T*3($tp) # tp[10]=
1544 $LD $acc2,$SIZE_T*14($sp)
1545 $ST $acc3,$SIZE_T*4($tp) # tp[11]=
1546 $LD $acc3,$SIZE_T*15($sp)
1547 $ST $acc4,$SIZE_T*5($tp) # tp[12]=
1548 $LD $acc4,$SIZE_T*16($sp)
1549 $ST $acc5,$SIZE_T*6($tp) # tp[13]=
1550 $LD $acc5,$SIZE_T*17($sp)
1551 $ST $acc6,$SIZE_T*7($tp) # tp[14]=
1552 $LD $acc6,$SIZE_T*18($sp)
1553 $ST $acc7,$SIZE_T*8($tp) # tp[15]=
1554 $LD $acc7,$SIZE_T*19($sp)
1555 addi $tp,$sp,$SIZE_T*11 # &tp[-1]
1556 mtctr $cnt
1557 b .Lsqr8x_reduction
1558
1559 .align 5
1560 .Lsqr8x_reduction:
1561 # (*) $UMULL $t0,$a0,$na0 # lo(n[0-7])*lo(t[0]*n0)
1562 $UMULL $t1,$a1,$na0
1563 $UMULL $t2,$a2,$na0
1564 $STU $na0,$SIZE_T($tp) # put aside t[0]*n0 for tail processing
1565 $UMULL $t3,$a3,$na0
1566 # (*) addc $acc0,$acc0,$t0
1567 addic $acc0,$acc0,-1 # (*)
1568 $UMULL $t0,$a4,$na0
1569 adde $acc0,$acc1,$t1
1570 $UMULL $t1,$a5,$na0
1571 adde $acc1,$acc2,$t2
1572 $UMULL $t2,$a6,$na0
1573 adde $acc2,$acc3,$t3
1574 $UMULL $t3,$a7,$na0
1575 adde $acc3,$acc4,$t0
1576 $UMULH $t0,$a0,$na0 # hi(n[0-7])*lo(t[0]*n0)
1577 adde $acc4,$acc5,$t1
1578 $UMULH $t1,$a1,$na0
1579 adde $acc5,$acc6,$t2
1580 $UMULH $t2,$a2,$na0
1581 adde $acc6,$acc7,$t3
1582 $UMULH $t3,$a3,$na0
1583 addze $acc7,$zero
1584 addc $acc0,$acc0,$t0
1585 $UMULH $t0,$a4,$na0
1586 adde $acc1,$acc1,$t1
1587 $UMULH $t1,$a5,$na0
1588 adde $acc2,$acc2,$t2
1589 $UMULH $t2,$a6,$na0
1590 adde $acc3,$acc3,$t3
1591 $UMULH $t3,$a7,$na0
1592 $UMULL $na0,$n0,$acc0 # next t[0]*n0
1593 adde $acc4,$acc4,$t0
1594 adde $acc5,$acc5,$t1
1595 adde $acc6,$acc6,$t2
1596 adde $acc7,$acc7,$t3
1597 bdnz .Lsqr8x_reduction
1598
1599 $LD $t0,$SIZE_T*1($tp)
1600 $LD $t1,$SIZE_T*2($tp)
1601 $LD $t2,$SIZE_T*3($tp)
1602 $LD $t3,$SIZE_T*4($tp)
1603 subi $rp,$tp,$SIZE_T*7
1604 $UCMP $np_end,$np # done yet?
1605 addc $acc0,$acc0,$t0
1606 $LD $t0,$SIZE_T*5($tp)
1607 adde $acc1,$acc1,$t1
1608 $LD $t1,$SIZE_T*6($tp)
1609 adde $acc2,$acc2,$t2
1610 $LD $t2,$SIZE_T*7($tp)
1611 adde $acc3,$acc3,$t3
1612 $LD $t3,$SIZE_T*8($tp)
1613 adde $acc4,$acc4,$t0
1614 adde $acc5,$acc5,$t1
1615 adde $acc6,$acc6,$t2
1616 adde $acc7,$acc7,$t3
1617 #addze $carry,$zero # moved below
1618 beq .Lsqr8x8_post_condition
1619
1620 $LD $n0,$SIZE_T*0($rp)
1621 $LD $a0,$SIZE_T*1($np)
1622 $LD $a1,$SIZE_T*2($np)
1623 $LD $a2,$SIZE_T*3($np)
1624 $LD $a3,$SIZE_T*4($np)
1625 $LD $a4,$SIZE_T*5($np)
1626 $LD $a5,$SIZE_T*6($np)
1627 $LD $a6,$SIZE_T*7($np)
1628 $LDU $a7,$SIZE_T*8($np)
1629 li $cnt,0
1630
1631 .align 5
1632 .Lsqr8x_tail:
1633 $UMULL $t0,$a0,$n0
1634 addze $carry,$zero # carry bit, modulo-scheduled
1635 $UMULL $t1,$a1,$n0
1636 addi $cnt,$cnt,$SIZE_T
1637 $UMULL $t2,$a2,$n0
1638 andi. $cnt,$cnt,$SIZE_T*8-1
1639 $UMULL $t3,$a3,$n0
1640 addc $acc0,$acc0,$t0
1641 $UMULL $t0,$a4,$n0
1642 adde $acc1,$acc1,$t1
1643 $UMULL $t1,$a5,$n0
1644 adde $acc2,$acc2,$t2
1645 $UMULL $t2,$a6,$n0
1646 adde $acc3,$acc3,$t3
1647 $UMULL $t3,$a7,$n0
1648 adde $acc4,$acc4,$t0
1649 $UMULH $t0,$a0,$n0
1650 adde $acc5,$acc5,$t1
1651 $UMULH $t1,$a1,$n0
1652 adde $acc6,$acc6,$t2
1653 $UMULH $t2,$a2,$n0
1654 adde $acc7,$acc7,$t3
1655 $UMULH $t3,$a3,$n0
1656 addze $carry,$carry
1657 $STU $acc0,$SIZE_T($tp)
1658 addc $acc0,$acc1,$t0
1659 $UMULH $t0,$a4,$n0
1660 adde $acc1,$acc2,$t1
1661 $UMULH $t1,$a5,$n0
1662 adde $acc2,$acc3,$t2
1663 $UMULH $t2,$a6,$n0
1664 adde $acc3,$acc4,$t3
1665 $UMULH $t3,$a7,$n0
1666 $LDX $n0,$rp,$cnt
1667 adde $acc4,$acc5,$t0
1668 adde $acc5,$acc6,$t1
1669 adde $acc6,$acc7,$t2
1670 adde $acc7,$carry,$t3
1671 #addze $carry,$zero # moved above
1672 bne .Lsqr8x_tail
1673 # note that carry flag is guaranteed
1674 # to be zero at this point
1675 $LD $a0,$SIZE_T*1($tp)
1676 $POP $carry,$SIZE_T*10($sp) # pull top-most carry in case we break
1677 $UCMP $np_end,$np # done yet?
1678 $LD $a1,$SIZE_T*2($tp)
1679 sub $t2,$np_end,$num # rewinded np
1680 $LD $a2,$SIZE_T*3($tp)
1681 $LD $a3,$SIZE_T*4($tp)
1682 $LD $a4,$SIZE_T*5($tp)
1683 $LD $a5,$SIZE_T*6($tp)
1684 $LD $a6,$SIZE_T*7($tp)
1685 $LD $a7,$SIZE_T*8($tp)
1686 beq .Lsqr8x_tail_break
1687
1688 addc $acc0,$acc0,$a0
1689 $LD $a0,$SIZE_T*1($np)
1690 adde $acc1,$acc1,$a1
1691 $LD $a1,$SIZE_T*2($np)
1692 adde $acc2,$acc2,$a2
1693 $LD $a2,$SIZE_T*3($np)
1694 adde $acc3,$acc3,$a3
1695 $LD $a3,$SIZE_T*4($np)
1696 adde $acc4,$acc4,$a4
1697 $LD $a4,$SIZE_T*5($np)
1698 adde $acc5,$acc5,$a5
1699 $LD $a5,$SIZE_T*6($np)
1700 adde $acc6,$acc6,$a6
1701 $LD $a6,$SIZE_T*7($np)
1702 adde $acc7,$acc7,$a7
1703 $LDU $a7,$SIZE_T*8($np)
1704 #addze $carry,$zero # moved above
1705 b .Lsqr8x_tail
1706
1707 .align 5
1708 .Lsqr8x_tail_break:
1709 $POP $n0,$SIZE_T*8($sp) # pull n0
1710 $POP $t3,$SIZE_T*9($sp) # &tp[2*num-1]
1711 addi $cnt,$tp,$SIZE_T*8 # end of current t[num] window
1712
1713 addic $carry,$carry,-1 # "move" top-most carry to carry bit
1714 adde $t0,$acc0,$a0
1715 $LD $acc0,$SIZE_T*8($rp)
1716 $LD $a0,$SIZE_T*1($t2) # recall that $t2 is &n[-1]
1717 adde $t1,$acc1,$a1
1718 $LD $acc1,$SIZE_T*9($rp)
1719 $LD $a1,$SIZE_T*2($t2)
1720 adde $acc2,$acc2,$a2
1721 $LD $a2,$SIZE_T*3($t2)
1722 adde $acc3,$acc3,$a3
1723 $LD $a3,$SIZE_T*4($t2)
1724 adde $acc4,$acc4,$a4
1725 $LD $a4,$SIZE_T*5($t2)
1726 adde $acc5,$acc5,$a5
1727 $LD $a5,$SIZE_T*6($t2)
1728 adde $acc6,$acc6,$a6
1729 $LD $a6,$SIZE_T*7($t2)
1730 adde $acc7,$acc7,$a7
1731 $LD $a7,$SIZE_T*8($t2)
1732 addi $np,$t2,$SIZE_T*8
1733 addze $t2,$zero # top-most carry
1734 $UMULL $na0,$n0,$acc0
1735 $ST $t0,$SIZE_T*1($tp)
1736 $UCMP $cnt,$t3 # did we hit the bottom?
1737 $ST $t1,$SIZE_T*2($tp)
1738 li $cnt,8
1739 $ST $acc2,$SIZE_T*3($tp)
1740 $LD $acc2,$SIZE_T*10($rp)
1741 $ST $acc3,$SIZE_T*4($tp)
1742 $LD $acc3,$SIZE_T*11($rp)
1743 $ST $acc4,$SIZE_T*5($tp)
1744 $LD $acc4,$SIZE_T*12($rp)
1745 $ST $acc5,$SIZE_T*6($tp)
1746 $LD $acc5,$SIZE_T*13($rp)
1747 $ST $acc6,$SIZE_T*7($tp)
1748 $LD $acc6,$SIZE_T*14($rp)
1749 $ST $acc7,$SIZE_T*8($tp)
1750 $LD $acc7,$SIZE_T*15($rp)
1751 $PUSH $t2,$SIZE_T*10($sp) # off-load top-most carry
1752 addi $tp,$rp,$SIZE_T*7 # slide the window
1753 mtctr $cnt
1754 bne .Lsqr8x_reduction
1755
1756 ################################################################
1757 # Final step. We see if result is larger than modulus, and
1758 # if it is, subtract the modulus. But comparison implies
1759 # subtraction. So we subtract modulus, see if it borrowed,
1760 # and conditionally copy original value.
1761 $POP $rp,$SIZE_T*6($sp) # pull &rp[-1]
1762 srwi $cnt,$num,`log($SIZE_T)/log(2)+3`
1763 mr $n0,$tp # put tp aside
1764 addi $tp,$tp,$SIZE_T*8
1765 subi $cnt,$cnt,1
1766 subfc $t0,$a0,$acc0
1767 subfe $t1,$a1,$acc1
1768 mr $carry,$t2
1769 mr $ap_end,$rp # $rp copy
1770
1771 mtctr $cnt
1772 b .Lsqr8x_sub
1773
1774 .align 5
1775 .Lsqr8x_sub:
1776 $LD $a0,$SIZE_T*1($np)
1777 $LD $acc0,$SIZE_T*1($tp)
1778 $LD $a1,$SIZE_T*2($np)
1779 $LD $acc1,$SIZE_T*2($tp)
1780 subfe $t2,$a2,$acc2
1781 $LD $a2,$SIZE_T*3($np)
1782 $LD $acc2,$SIZE_T*3($tp)
1783 subfe $t3,$a3,$acc3
1784 $LD $a3,$SIZE_T*4($np)
1785 $LD $acc3,$SIZE_T*4($tp)
1786 $ST $t0,$SIZE_T*1($rp)
1787 subfe $t0,$a4,$acc4
1788 $LD $a4,$SIZE_T*5($np)
1789 $LD $acc4,$SIZE_T*5($tp)
1790 $ST $t1,$SIZE_T*2($rp)
1791 subfe $t1,$a5,$acc5
1792 $LD $a5,$SIZE_T*6($np)
1793 $LD $acc5,$SIZE_T*6($tp)
1794 $ST $t2,$SIZE_T*3($rp)
1795 subfe $t2,$a6,$acc6
1796 $LD $a6,$SIZE_T*7($np)
1797 $LD $acc6,$SIZE_T*7($tp)
1798 $ST $t3,$SIZE_T*4($rp)
1799 subfe $t3,$a7,$acc7
1800 $LDU $a7,$SIZE_T*8($np)
1801 $LDU $acc7,$SIZE_T*8($tp)
1802 $ST $t0,$SIZE_T*5($rp)
1803 subfe $t0,$a0,$acc0
1804 $ST $t1,$SIZE_T*6($rp)
1805 subfe $t1,$a1,$acc1
1806 $ST $t2,$SIZE_T*7($rp)
1807 $STU $t3,$SIZE_T*8($rp)
1808 bdnz .Lsqr8x_sub
1809
1810 srwi $cnt,$num,`log($SIZE_T)/log(2)+2`
1811 $LD $a0,$SIZE_T*1($ap_end) # original $rp
1812 $LD $acc0,$SIZE_T*1($n0) # original $tp
1813 subi $cnt,$cnt,1
1814 $LD $a1,$SIZE_T*2($ap_end)
1815 $LD $acc1,$SIZE_T*2($n0)
1816 subfe $t2,$a2,$acc2
1817 $LD $a2,$SIZE_T*3($ap_end)
1818 $LD $acc2,$SIZE_T*3($n0)
1819 subfe $t3,$a3,$acc3
1820 $LD $a3,$SIZE_T*4($ap_end)
1821 $LDU $acc3,$SIZE_T*4($n0)
1822 $ST $t0,$SIZE_T*1($rp)
1823 subfe $t0,$a4,$acc4
1824 $ST $t1,$SIZE_T*2($rp)
1825 subfe $t1,$a5,$acc5
1826 $ST $t2,$SIZE_T*3($rp)
1827 subfe $t2,$a6,$acc6
1828 $ST $t3,$SIZE_T*4($rp)
1829 subfe $t3,$a7,$acc7
1830 $ST $t0,$SIZE_T*5($rp)
1831 subfe $carry,$zero,$carry # did it borrow?
1832 $ST $t1,$SIZE_T*6($rp)
1833 $ST $t2,$SIZE_T*7($rp)
1834 $ST $t3,$SIZE_T*8($rp)
1835
1836 addi $tp,$sp,$SIZE_T*11
1837 mtctr $cnt
1838
1839 .Lsqr4x_cond_copy:
1840 andc $a0,$a0,$carry
1841 $ST $zero,-$SIZE_T*3($n0) # wipe stack clean
1842 and $acc0,$acc0,$carry
1843 $ST $zero,-$SIZE_T*2($n0)
1844 andc $a1,$a1,$carry
1845 $ST $zero,-$SIZE_T*1($n0)
1846 and $acc1,$acc1,$carry
1847 $ST $zero,-$SIZE_T*0($n0)
1848 andc $a2,$a2,$carry
1849 $ST $zero,$SIZE_T*1($tp)
1850 and $acc2,$acc2,$carry
1851 $ST $zero,$SIZE_T*2($tp)
1852 andc $a3,$a3,$carry
1853 $ST $zero,$SIZE_T*3($tp)
1854 and $acc3,$acc3,$carry
1855 $STU $zero,$SIZE_T*4($tp)
1856 or $t0,$a0,$acc0
1857 $LD $a0,$SIZE_T*5($ap_end)
1858 $LD $acc0,$SIZE_T*1($n0)
1859 or $t1,$a1,$acc1
1860 $LD $a1,$SIZE_T*6($ap_end)
1861 $LD $acc1,$SIZE_T*2($n0)
1862 or $t2,$a2,$acc2
1863 $LD $a2,$SIZE_T*7($ap_end)
1864 $LD $acc2,$SIZE_T*3($n0)
1865 or $t3,$a3,$acc3
1866 $LD $a3,$SIZE_T*8($ap_end)
1867 $LDU $acc3,$SIZE_T*4($n0)
1868 $ST $t0,$SIZE_T*1($ap_end)
1869 $ST $t1,$SIZE_T*2($ap_end)
1870 $ST $t2,$SIZE_T*3($ap_end)
1871 $STU $t3,$SIZE_T*4($ap_end)
1872 bdnz .Lsqr4x_cond_copy
1873
1874 $POP $ap,0($sp) # pull saved sp
1875 andc $a0,$a0,$carry
1876 and $acc0,$acc0,$carry
1877 andc $a1,$a1,$carry
1878 and $acc1,$acc1,$carry
1879 andc $a2,$a2,$carry
1880 and $acc2,$acc2,$carry
1881 andc $a3,$a3,$carry
1882 and $acc3,$acc3,$carry
1883 or $t0,$a0,$acc0
1884 or $t1,$a1,$acc1
1885 or $t2,$a2,$acc2
1886 or $t3,$a3,$acc3
1887 $ST $t0,$SIZE_T*1($ap_end)
1888 $ST $t1,$SIZE_T*2($ap_end)
1889 $ST $t2,$SIZE_T*3($ap_end)
1890 $ST $t3,$SIZE_T*4($ap_end)
1891
1892 b .Lsqr8x_done
1893
1894 .align 5
1895 .Lsqr8x8_post_condition:
1896 $POP $rp,$SIZE_T*6($sp) # pull rp
1897 $POP $ap,0($sp) # pull saved sp
1898 addze $carry,$zero
1899
1900 # $acc0-7,$carry hold result, $a0-7 hold modulus
1901 subfc $acc0,$a0,$acc0
1902 subfe $acc1,$a1,$acc1
1903 $ST $zero,$SIZE_T*12($sp) # wipe stack clean
1904 $ST $zero,$SIZE_T*13($sp)
1905 subfe $acc2,$a2,$acc2
1906 $ST $zero,$SIZE_T*14($sp)
1907 $ST $zero,$SIZE_T*15($sp)
1908 subfe $acc3,$a3,$acc3
1909 $ST $zero,$SIZE_T*16($sp)
1910 $ST $zero,$SIZE_T*17($sp)
1911 subfe $acc4,$a4,$acc4
1912 $ST $zero,$SIZE_T*18($sp)
1913 $ST $zero,$SIZE_T*19($sp)
1914 subfe $acc5,$a5,$acc5
1915 $ST $zero,$SIZE_T*20($sp)
1916 $ST $zero,$SIZE_T*21($sp)
1917 subfe $acc6,$a6,$acc6
1918 $ST $zero,$SIZE_T*22($sp)
1919 $ST $zero,$SIZE_T*23($sp)
1920 subfe $acc7,$a7,$acc7
1921 $ST $zero,$SIZE_T*24($sp)
1922 $ST $zero,$SIZE_T*25($sp)
1923 subfe $carry,$zero,$carry # did it borrow?
1924 $ST $zero,$SIZE_T*26($sp)
1925 $ST $zero,$SIZE_T*27($sp)
1926
1927 and $a0,$a0,$carry
1928 and $a1,$a1,$carry
1929 addc $acc0,$acc0,$a0 # add modulus back if borrowed
1930 and $a2,$a2,$carry
1931 adde $acc1,$acc1,$a1
1932 and $a3,$a3,$carry
1933 adde $acc2,$acc2,$a2
1934 and $a4,$a4,$carry
1935 adde $acc3,$acc3,$a3
1936 and $a5,$a5,$carry
1937 adde $acc4,$acc4,$a4
1938 and $a6,$a6,$carry
1939 adde $acc5,$acc5,$a5
1940 and $a7,$a7,$carry
1941 adde $acc6,$acc6,$a6
1942 adde $acc7,$acc7,$a7
1943 $ST $acc0,$SIZE_T*1($rp)
1944 $ST $acc1,$SIZE_T*2($rp)
1945 $ST $acc2,$SIZE_T*3($rp)
1946 $ST $acc3,$SIZE_T*4($rp)
1947 $ST $acc4,$SIZE_T*5($rp)
1948 $ST $acc5,$SIZE_T*6($rp)
1949 $ST $acc6,$SIZE_T*7($rp)
1950 $ST $acc7,$SIZE_T*8($rp)
1951
1952 .Lsqr8x_done:
1953 $PUSH $zero,$SIZE_T*8($sp)
1954 $PUSH $zero,$SIZE_T*10($sp)
1955
1956 $POP r14,-$SIZE_T*18($ap)
1957 li r3,1 # signal "done"
1958 $POP r15,-$SIZE_T*17($ap)
1959 $POP r16,-$SIZE_T*16($ap)
1960 $POP r17,-$SIZE_T*15($ap)
1961 $POP r18,-$SIZE_T*14($ap)
1962 $POP r19,-$SIZE_T*13($ap)
1963 $POP r20,-$SIZE_T*12($ap)
1964 $POP r21,-$SIZE_T*11($ap)
1965 $POP r22,-$SIZE_T*10($ap)
1966 $POP r23,-$SIZE_T*9($ap)
1967 $POP r24,-$SIZE_T*8($ap)
1968 $POP r25,-$SIZE_T*7($ap)
1969 $POP r26,-$SIZE_T*6($ap)
1970 $POP r27,-$SIZE_T*5($ap)
1971 $POP r28,-$SIZE_T*4($ap)
1972 $POP r29,-$SIZE_T*3($ap)
1973 $POP r30,-$SIZE_T*2($ap)
1974 $POP r31,-$SIZE_T*1($ap)
1975 mr $sp,$ap
1976 blr
1977 .long 0
1978 .byte 0,12,4,0x20,0x80,18,6,0
1979 .long 0
1980 .size __bn_sqr8x_mont,.-__bn_sqr8x_mont
1981 ___
1982 }
1983 $code.=<<___;
1984 .asciz "Montgomery Multiplication for PPC, CRYPTOGAMS by <appro\@openssl.org>"
1985 ___
1986
1987 $code =~ s/\`([^\`]*)\`/eval $1/gem;
1988 print $code;
1989 close STDOUT;