]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/modes/asm/ghash-sparcv9.pl
gcm128.c and assembler modules: change argument order for gcm_ghash_4bit.
[thirdparty/openssl.git] / crypto / modes / asm / ghash-sparcv9.pl
1 #!/usr/bin/env perl
2
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9
10 # March 2010
11 #
12 # The module implements "4-bit" GCM GHASH function and underlying
13 # single multiplication operation in GF(2^128). "4-bit" means that it
14 # uses 256 bytes per-key table [+128 bytes shared table]. Performance
15 # results are for streamed GHASH subroutine on UltraSPARC pre-Tx CPU
16 # and are expressed in cycles per processed byte, less is better:
17 #
18 # gcc 3.3.x cc 5.2 this assembler
19 #
20 # 32-bit build 81.0 48.6 11.8 (+586%/+311%)
21 # 64-bit build 27.5 20.3 11.8 (+133%/+72%)
22 #
23 # I don't quite understand why difference between 32-bit and 64-bit
24 # compiler-generated code is so big. Compilers *were* instructed to
25 # generate code for UltraSPARC and should have used 64-bit registers
26 # for Z vector (see C code) even in 32-bit build... Oh well, it only
27 # means more impressive improvement coefficients for this assembler
28 # module;-) Loops are aggressively modulo-scheduled in respect to
29 # references to input data and Z.hi updates to achieve 12 cycles
30 # timing. To anchor to something else, sha1-sparcv9.pl spends 11.6
31 # cycles to process one byte [on UltraSPARC pre-Tx CPU].
32
33 $bits=32;
34 for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); }
35 if ($bits==64) { $bias=2047; $frame=192; }
36 else { $bias=0; $frame=112; }
37
38 $output=shift;
39 open STDOUT,">$output";
40
41 $Zhi="%o0"; # 64-bit values
42 $Zlo="%o1";
43 $Thi="%o2";
44 $Tlo="%o3";
45 $rem="%o4";
46 $tmp="%o5";
47
48 $nhi="%l0"; # small values and pointers
49 $nlo="%l1";
50 $xi0="%l2";
51 $xi1="%l3";
52 $rem_4bit="%l4";
53 $remi="%l5";
54 $Htblo="%l6";
55 $cnt="%l7";
56
57 $Xi="%i0"; # input argument block
58 $Htbl="%i1";
59 $inp="%i2";
60 $len="%i3";
61
62 $code.=<<___;
63 .section ".text",#alloc,#execinstr
64
65 .align 64
66 rem_4bit:
67 .long `0x0000<<16`,0,`0x1C20<<16`,0,`0x3840<<16`,0,`0x2460<<16`,0
68 .long `0x7080<<16`,0,`0x6CA0<<16`,0,`0x48C0<<16`,0,`0x54E0<<16`,0
69 .long `0xE100<<16`,0,`0xFD20<<16`,0,`0xD940<<16`,0,`0xC560<<16`,0
70 .long `0x9180<<16`,0,`0x8DA0<<16`,0,`0xA9C0<<16`,0,`0xB5E0<<16`,0
71 .type rem_4bit,#object
72 .size rem_4bit,(.-rem_4bit)
73
74 .globl gcm_ghash_4bit
75 .align 32
76 gcm_ghash_4bit:
77 save %sp,-$frame,%sp
78 ldub [$inp+15],$nlo
79 ldub [$Xi+15],$xi0
80 ldub [$Xi+14],$xi1
81 add $len,$inp,$len
82 add $Htbl,8,$Htblo
83
84 1: call .+8
85 add %o7,rem_4bit-1b,$rem_4bit
86
87 .Louter:
88 xor $xi0,$nlo,$nlo
89 and $nlo,0xf0,$nhi
90 and $nlo,0x0f,$nlo
91 sll $nlo,4,$nlo
92 ldx [$Htblo+$nlo],$Zlo
93 ldx [$Htbl+$nlo],$Zhi
94
95 ldub [$inp+14],$nlo
96
97 ldx [$Htblo+$nhi],$Tlo
98 and $Zlo,0xf,$remi
99 ldx [$Htbl+$nhi],$Thi
100 sll $remi,3,$remi
101 ldx [$rem_4bit+$remi],$rem
102 srlx $Zlo,4,$Zlo
103 mov 13,$cnt
104 sllx $Zhi,60,$tmp
105 xor $Tlo,$Zlo,$Zlo
106 srlx $Zhi,4,$Zhi
107 xor $Zlo,$tmp,$Zlo
108
109 xor $xi1,$nlo,$nlo
110 and $Zlo,0xf,$remi
111 and $nlo,0xf0,$nhi
112 and $nlo,0x0f,$nlo
113 ba .Lghash_inner
114 sll $nlo,4,$nlo
115 .align 32
116 .Lghash_inner:
117 ldx [$Htblo+$nlo],$Tlo
118 sll $remi,3,$remi
119 xor $Thi,$Zhi,$Zhi
120 ldx [$Htbl+$nlo],$Thi
121 srlx $Zlo,4,$Zlo
122 xor $rem,$Zhi,$Zhi
123 ldx [$rem_4bit+$remi],$rem
124 sllx $Zhi,60,$tmp
125 xor $Tlo,$Zlo,$Zlo
126 ldub [$inp+$cnt],$nlo
127 srlx $Zhi,4,$Zhi
128 xor $Zlo,$tmp,$Zlo
129 ldub [$Xi+$cnt],$xi1
130 xor $Thi,$Zhi,$Zhi
131 and $Zlo,0xf,$remi
132
133 ldx [$Htblo+$nhi],$Tlo
134 sll $remi,3,$remi
135 xor $rem,$Zhi,$Zhi
136 ldx [$Htbl+$nhi],$Thi
137 srlx $Zlo,4,$Zlo
138 ldx [$rem_4bit+$remi],$rem
139 sllx $Zhi,60,$tmp
140 xor $xi1,$nlo,$nlo
141 srlx $Zhi,4,$Zhi
142 and $nlo,0xf0,$nhi
143 addcc $cnt,-1,$cnt
144 xor $Zlo,$tmp,$Zlo
145 and $nlo,0x0f,$nlo
146 xor $Tlo,$Zlo,$Zlo
147 sll $nlo,4,$nlo
148 blu .Lghash_inner
149 and $Zlo,0xf,$remi
150
151 ldx [$Htblo+$nlo],$Tlo
152 sll $remi,3,$remi
153 xor $Thi,$Zhi,$Zhi
154 ldx [$Htbl+$nlo],$Thi
155 srlx $Zlo,4,$Zlo
156 xor $rem,$Zhi,$Zhi
157 ldx [$rem_4bit+$remi],$rem
158 sllx $Zhi,60,$tmp
159 xor $Tlo,$Zlo,$Zlo
160 srlx $Zhi,4,$Zhi
161 xor $Zlo,$tmp,$Zlo
162 xor $Thi,$Zhi,$Zhi
163
164 add $inp,16,$inp
165 cmp $inp,$len
166 be,pn `$bits==64?"%xcc":"%icc"`,.Ldone
167 and $Zlo,0xf,$remi
168
169 ldx [$Htblo+$nhi],$Tlo
170 sll $remi,3,$remi
171 xor $rem,$Zhi,$Zhi
172 ldx [$Htbl+$nhi],$Thi
173 srlx $Zlo,4,$Zlo
174 ldx [$rem_4bit+$remi],$rem
175 sllx $Zhi,60,$tmp
176 xor $Tlo,$Zlo,$Zlo
177 ldub [$inp+15],$nlo
178 srlx $Zhi,4,$Zhi
179 xor $Zlo,$tmp,$Zlo
180 xor $Thi,$Zhi,$Zhi
181 stx $Zlo,[$Xi+8]
182 xor $rem,$Zhi,$Zhi
183 stx $Zhi,[$Xi]
184 srl $Zlo,8,$xi1
185 and $Zlo,0xff,$xi0
186 ba .Louter
187 and $xi1,0xff,$xi1
188 .align 32
189 .Ldone:
190 ldx [$Htblo+$nhi],$Tlo
191 sll $remi,3,$remi
192 xor $rem,$Zhi,$Zhi
193 ldx [$Htbl+$nhi],$Thi
194 srlx $Zlo,4,$Zlo
195 ldx [$rem_4bit+$remi],$rem
196 sllx $Zhi,60,$tmp
197 xor $Tlo,$Zlo,$Zlo
198 srlx $Zhi,4,$Zhi
199 xor $Zlo,$tmp,$Zlo
200 xor $Thi,$Zhi,$Zhi
201 stx $Zlo,[$Xi+8]
202 xor $rem,$Zhi,$Zhi
203 stx $Zhi,[$Xi]
204
205 ret
206 restore
207 .type gcm_ghash_4bit,#function
208 .size gcm_ghash_4bit,(.-gcm_ghash_4bit)
209 ___
210
211 undef $inp;
212 undef $len;
213
214 $code.=<<___;
215 .globl gcm_gmult_4bit
216 .align 32
217 gcm_gmult_4bit:
218 save %sp,-$frame,%sp
219 ldub [$Xi+15],$nlo
220 add $Htbl,8,$Htblo
221
222 1: call .+8
223 add %o7,rem_4bit-1b,$rem_4bit
224
225 and $nlo,0xf0,$nhi
226 and $nlo,0x0f,$nlo
227 sll $nlo,4,$nlo
228 ldx [$Htblo+$nlo],$Zlo
229 ldx [$Htbl+$nlo],$Zhi
230
231 ldub [$Xi+14],$nlo
232
233 ldx [$Htblo+$nhi],$Tlo
234 and $Zlo,0xf,$remi
235 ldx [$Htbl+$nhi],$Thi
236 sll $remi,3,$remi
237 ldx [$rem_4bit+$remi],$rem
238 srlx $Zlo,4,$Zlo
239 mov 13,$cnt
240 sllx $Zhi,60,$tmp
241 xor $Tlo,$Zlo,$Zlo
242 srlx $Zhi,4,$Zhi
243 xor $Zlo,$tmp,$Zlo
244
245 and $Zlo,0xf,$remi
246 and $nlo,0xf0,$nhi
247 and $nlo,0x0f,$nlo
248 ba .Lgmult_inner
249 sll $nlo,4,$nlo
250 .align 32
251 .Lgmult_inner:
252 ldx [$Htblo+$nlo],$Tlo
253 sll $remi,3,$remi
254 xor $Thi,$Zhi,$Zhi
255 ldx [$Htbl+$nlo],$Thi
256 srlx $Zlo,4,$Zlo
257 xor $rem,$Zhi,$Zhi
258 ldx [$rem_4bit+$remi],$rem
259 sllx $Zhi,60,$tmp
260 xor $Tlo,$Zlo,$Zlo
261 ldub [$Xi+$cnt],$nlo
262 srlx $Zhi,4,$Zhi
263 xor $Zlo,$tmp,$Zlo
264 xor $Thi,$Zhi,$Zhi
265 and $Zlo,0xf,$remi
266
267 ldx [$Htblo+$nhi],$Tlo
268 sll $remi,3,$remi
269 xor $rem,$Zhi,$Zhi
270 ldx [$Htbl+$nhi],$Thi
271 srlx $Zlo,4,$Zlo
272 ldx [$rem_4bit+$remi],$rem
273 sllx $Zhi,60,$tmp
274 srlx $Zhi,4,$Zhi
275 and $nlo,0xf0,$nhi
276 addcc $cnt,-1,$cnt
277 xor $Zlo,$tmp,$Zlo
278 and $nlo,0x0f,$nlo
279 xor $Tlo,$Zlo,$Zlo
280 sll $nlo,4,$nlo
281 blu .Lgmult_inner
282 and $Zlo,0xf,$remi
283
284 ldx [$Htblo+$nlo],$Tlo
285 sll $remi,3,$remi
286 xor $Thi,$Zhi,$Zhi
287 ldx [$Htbl+$nlo],$Thi
288 srlx $Zlo,4,$Zlo
289 xor $rem,$Zhi,$Zhi
290 ldx [$rem_4bit+$remi],$rem
291 sllx $Zhi,60,$tmp
292 xor $Tlo,$Zlo,$Zlo
293 srlx $Zhi,4,$Zhi
294 xor $Zlo,$tmp,$Zlo
295 xor $Thi,$Zhi,$Zhi
296 and $Zlo,0xf,$remi
297
298 ldx [$Htblo+$nhi],$Tlo
299 sll $remi,3,$remi
300 xor $rem,$Zhi,$Zhi
301 ldx [$Htbl+$nhi],$Thi
302 srlx $Zlo,4,$Zlo
303 ldx [$rem_4bit+$remi],$rem
304 sllx $Zhi,60,$tmp
305 xor $Tlo,$Zlo,$Zlo
306 srlx $Zhi,4,$Zhi
307 xor $Zlo,$tmp,$Zlo
308 xor $Thi,$Zhi,$Zhi
309 stx $Zlo,[$Xi+8]
310 xor $rem,$Zhi,$Zhi
311 stx $Zhi,[$Xi]
312
313 ret
314 restore
315 .type gcm_gmult_4bit,#function
316 .size gcm_gmult_4bit,(.-gcm_gmult_4bit)
317 .asciz "GHASH for SPARCv9, CRYPTOGAMS by <appro\@openssl.org>"
318 ___
319
320 $code =~ s/\`([^\`]*)\`/eval $1/gem;
321 print $code;
322 close STDOUT;