]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/bn/asm/ppc-mont.pl
Futher minor PPC assembler update.
[thirdparty/openssl.git] / crypto / bn / asm / ppc-mont.pl
1 #!/usr/bin/env perl
2
3 # ====================================================================
4 # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5 # project. Rights for redistribution and usage in source and binary
6 # forms are granted according to the OpenSSL license.
7 # ====================================================================
8
9 # April 2006
10
11 # "Teaser" Montgomery multiplication module for PowerPC. It's possible
12 # to gain a bit more by modulo-scheduling outer loop, then dedicated
13 # squaring procedure should give further 20% and code can be adapted
14 # for 32-bit application running on 64-bit CPU. As for the latter.
15 # It won't be able to achieve "native" 64-bit performance, because in
16 # 32-bit application context every addc instruction will have to be
17 # expanded as addc, twice right shift by 32 and finally adde, etc.
18 # So far RSA *sign* performance improvement over pre-bn_mul_mont asm
19 # for 64-bit application running on PPC970/G5 is:
20 #
21 # 512-bit +65%
22 # 1024-bit +35%
23 # 2048-bit +18%
24 # 4096-bit +4%
25
26 $output = shift;
27
28 if ($output =~ /32\-mont\.s/) {
29 $BITS= 32;
30 $BNSZ= $BITS/8;
31 $SIZE_T=4;
32 $RZONE= 224;
33 $FRAME= $SIZE_T*16;
34
35 $LD= "lwz"; # load
36 $LDU= "lwzu"; # load and update
37 $LDX= "lwzx"; # load indexed
38 $ST= "stw"; # store
39 $STU= "stwu"; # store and update
40 $STX= "stwx"; # store indexed
41 $STUX= "stwux"; # store indexed and update
42 $UMULL= "mullw"; # unsigned multiply low
43 $UMULH= "mulhwu"; # unsigned multiply high
44 $UCMP= "cmplw"; # unsigned compare
45 $PUSH= $ST;
46 $POP= $LD;
47 } elsif ($output =~ /64\-mont\.s/) {
48 $BITS= 64;
49 $BNSZ= $BITS/8;
50 $SIZE_T=8;
51 $RZONE= 288;
52 $FRAME= $SIZE_T*16;
53
54 # same as above, but 64-bit mnemonics...
55 $LD= "ld"; # load
56 $LDU= "ldu"; # load and update
57 $LDX= "ldx"; # load indexed
58 $ST= "std"; # store
59 $STU= "stdu"; # store and update
60 $STX= "stdx"; # store indexed
61 $STUX= "stdux"; # store indexed and update
62 $UMULL= "mulld"; # unsigned multiply low
63 $UMULH= "mulhdu"; # unsigned multiply high
64 $UCMP= "cmpld"; # unsigned compare
65 $PUSH= $ST;
66 $POP= $LD;
67 } else { die "nonsense $output"; }
68
69 ( defined shift || open STDOUT,"| $^X ../perlasm/ppc-xlate.pl $output" ) ||
70 die "can't call ../perlasm/ppc-xlate.pl: $!";
71
72 $sp="r1";
73 $toc="r2";
74 $rp="r3"; $ovf="r3";
75 $ap="r4";
76 $bp="r5";
77 $np="r6";
78 $n0="r7";
79 $num="r8";
80 $rp="r9"; # $rp is reassigned
81 $aj="r10";
82 $nj="r11";
83 $tj="r12";
84 # non-volatile registers
85 $i="r14";
86 $j="r15";
87 $tp="r16";
88 $m0="r17";
89 $m1="r18";
90 $lo0="r19";
91 $hi0="r20";
92 $lo1="r21";
93 $hi1="r22";
94 $alo="r23";
95 $ahi="r24";
96 $nlo="r25";
97 #
98 $nhi="r0";
99
100 $code=<<___;
101 .machine "any"
102 .text
103
104 .globl .bn_mul_mont
105 .align 4
106 .bn_mul_mont:
107 cmpwi $num,4
108 mr $rp,r3 ; $rp is reassigned
109 li r3,0
110 bltlr
111
112 slwi $num,$num,`log($BNSZ)/log(2)`
113 li $tj,-4096
114 addi $ovf,$num,`$FRAME+$RZONE`
115 subf $ovf,$ovf,$sp ; $sp-$ovf
116 and $ovf,$ovf,$tj ; minimize TLB usage
117 subf $ovf,$sp,$ovf ; $ovf-$sp
118 srwi $num,$num,`log($BNSZ)/log(2)`
119 $STUX $sp,$sp,$ovf
120
121 $PUSH r14,`4*$SIZE_T`($sp)
122 $PUSH r15,`5*$SIZE_T`($sp)
123 $PUSH r16,`6*$SIZE_T`($sp)
124 $PUSH r17,`7*$SIZE_T`($sp)
125 $PUSH r18,`8*$SIZE_T`($sp)
126 $PUSH r19,`9*$SIZE_T`($sp)
127 $PUSH r20,`10*$SIZE_T`($sp)
128 $PUSH r21,`11*$SIZE_T`($sp)
129 $PUSH r22,`12*$SIZE_T`($sp)
130 $PUSH r23,`13*$SIZE_T`($sp)
131 $PUSH r24,`14*$SIZE_T`($sp)
132 $PUSH r25,`15*$SIZE_T`($sp)
133
134 $LD $n0,0($n0) ; pull n0[0] value
135 addi $num,$num,-2 ; adjust $num for counter register
136 \f
137 $LD $m0,0($bp) ; m0=bp[0]
138 $LD $aj,0($ap) ; ap[0]
139 addi $tp,$sp,$FRAME
140 $UMULL $lo0,$aj,$m0 ; ap[0]*bp[0]
141 $UMULH $hi0,$aj,$m0
142
143 $LD $aj,$BNSZ($ap) ; ap[1]
144 $LD $nj,0($np) ; np[0]
145
146 $UMULL $m1,$lo0,$n0 ; "tp[0]"*n0
147
148 $UMULL $alo,$aj,$m0 ; ap[1]*bp[0]
149 $UMULH $ahi,$aj,$m0
150
151 $UMULL $lo1,$nj,$m1 ; np[0]*m1
152 $UMULH $hi1,$nj,$m1
153 $LD $nj,$BNSZ($np) ; np[1]
154 addc $lo1,$lo1,$lo0
155 addze $hi1,$hi1
156
157 $UMULL $nlo,$nj,$m1 ; np[1]*m1
158 $UMULH $nhi,$nj,$m1
159
160 mtctr $num
161 li $j,`2*$BNSZ`
162 .align 4
163 L1st:
164 $LDX $aj,$ap,$j ; ap[j]
165 $LDX $nj,$np,$j ; np[j]
166 addc $lo0,$alo,$hi0
167 addze $hi0,$ahi
168 $UMULL $alo,$aj,$m0 ; ap[j]*bp[0]
169 $UMULH $ahi,$aj,$m0
170
171 addc $lo1,$nlo,$hi1
172 addze $hi1,$nhi
173 $UMULL $nlo,$nj,$m1 ; np[j]*m1
174 $UMULH $nhi,$nj,$m1
175 addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[0]
176 addze $hi1,$hi1
177 $ST $lo1,0($tp) ; tp[j-1]
178
179 addi $j,$j,$BNSZ ; j++
180 addi $tp,$tp,$BNSZ ; tp++
181 bdnz- L1st
182 ;L1st
183 addc $lo0,$alo,$hi0
184 addze $hi0,$ahi
185
186 addc $lo1,$nlo,$hi1
187 addze $hi1,$nhi
188 addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[0]
189 addze $hi1,$hi1
190 $ST $lo1,0($tp) ; tp[j-1]
191
192 li $ovf,0
193 addc $hi1,$hi1,$hi0
194 addze $ovf,$ovf ; upmost overflow bit
195 $ST $hi1,$BNSZ($tp)
196 \f
197 li $i,$BNSZ
198 .align 4
199 Louter:
200 $LDX $m0,$bp,$i ; m0=bp[i]
201 $LD $aj,0($ap) ; ap[0]
202 addi $tp,$sp,$FRAME
203 $LD $tj,$FRAME($sp) ; tp[0]
204 $UMULL $lo0,$aj,$m0 ; ap[0]*bp[i]
205 $UMULH $hi0,$aj,$m0
206 $LD $aj,$BNSZ($ap) ; ap[1]
207 $LD $nj,0($np) ; np[0]
208 addc $lo0,$lo0,$tj ; ap[0]*bp[i]+tp[0]
209 addze $hi0,$hi0
210
211 $UMULL $m1,$lo0,$n0 ; tp[0]*n0
212
213 $UMULL $alo,$aj,$m0 ; ap[j]*bp[i]
214 $UMULH $ahi,$aj,$m0
215
216 $UMULL $lo1,$nj,$m1 ; np[0]*m1
217 $UMULH $hi1,$nj,$m1
218 $LD $nj,$BNSZ($np) ; np[1]
219 addc $lo1,$lo1,$lo0
220 addze $hi1,$hi1
221
222 $UMULL $nlo,$nj,$m1 ; np[1]*m1
223 $UMULH $nhi,$nj,$m1
224 \f
225 mtctr $num
226 li $j,`2*$BNSZ`
227 .align 4
228 Linner:
229 $LDX $aj,$ap,$j ; ap[j]
230 $LD $tj,$BNSZ($tp) ; tp[j]
231 addc $lo0,$alo,$hi0
232 addze $hi0,$ahi
233 $LDX $nj,$np,$j ; np[j]
234 addc $lo0,$lo0,$tj ; ap[j]*bp[i]+tp[j]
235 addze $hi0,$hi0
236 $UMULL $alo,$aj,$m0 ; ap[j]*bp[i]
237 $UMULH $ahi,$aj,$m0
238
239 addc $lo1,$nlo,$hi1
240 addze $hi1,$nhi
241 $UMULL $nlo,$nj,$m1 ; np[j]*m1
242 $UMULH $nhi,$nj,$m1
243 addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[i]+tp[j]
244 addze $hi1,$hi1
245 $ST $lo1,0($tp) ; tp[j-1]
246
247 addi $j,$j,$BNSZ ; j++
248 addi $tp,$tp,$BNSZ ; tp++
249 bdnz- Linner
250 ;Linner
251 $LD $tj,$BNSZ($tp) ; tp[j]
252 addc $lo0,$alo,$hi0
253 addze $hi0,$ahi
254 addc $lo0,$lo0,$tj ; ap[j]*bp[i]+tp[j]
255 addze $hi0,$hi0
256
257 addc $lo1,$nlo,$hi1
258 addze $hi1,$nhi
259 addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[i]+tp[j]
260 addze $hi1,$hi1
261 $ST $lo1,0($tp) ; tp[j-1]
262
263 addic $ovf,$ovf,-1 ; move upmost overflow to XER[CA]
264 li $ovf,0
265 adde $hi1,$hi1,$hi0
266 addze $ovf,$ovf
267 $ST $hi1,$BNSZ($tp)
268 ;
269 slwi $tj,$num,`log($BNSZ)/log(2)`
270 $UCMP $i,$tj
271 addi $i,$i,$BNSZ
272 ble- Louter
273 \f
274 addi $num,$num,2 ; restore $num
275 addi $tp,$sp,$FRAME
276 mtctr $num
277 li $j,0
278
279 subfc. $ovf,$j,$ovf ; sets XER[CA]
280 bne Lsub
281 $UCMP $hi1,$nj
282 bge Lsub
283 .align 4
284 Lcopy:
285 $LDX $tj,$tp,$j
286 $STX $tj,$rp,$j
287 $STX $j,$tp,$j ; zap at once
288 addi $j,$j,$BNSZ
289 bdnz- Lcopy
290
291 Lexit:
292 $POP r14,`4*$SIZE_T`($sp)
293 $POP r15,`5*$SIZE_T`($sp)
294 $POP r16,`6*$SIZE_T`($sp)
295 $POP r17,`7*$SIZE_T`($sp)
296 $POP r18,`8*$SIZE_T`($sp)
297 $POP r19,`9*$SIZE_T`($sp)
298 $POP r20,`10*$SIZE_T`($sp)
299 $POP r21,`11*$SIZE_T`($sp)
300 $POP r22,`12*$SIZE_T`($sp)
301 $POP r23,`13*$SIZE_T`($sp)
302 $POP r24,`14*$SIZE_T`($sp)
303 $POP r25,`15*$SIZE_T`($sp)
304 $POP $sp,0($sp)
305 li r3,1
306 blr
307 .long 0
308 .align 4
309 Lsub: $LDX $tj,$tp,$j
310 $LDX $nj,$np,$j
311 subfe $tj,$nj,$tj ; tp[j]-np[j]
312 $STX $tj,$rp,$j
313 addi $j,$j,$BNSZ
314 bdnz- Lsub
315 li $j,0
316 subfe. $ovf,$j,$ovf
317 mtctr $num
318 bne Lcopy
319 .align 4
320 Lzap: $STX $j,$tp,$j
321 addi $j,$j,$BNSZ
322 bdnz- Lzap
323 b Lexit
324 ___
325
326 $code =~ s/\`([^\`]*)\`/eval $1/gem;
327 print $code;
328 close STDOUT;