]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/chacha/asm/chacha-ppc.pl
PPC assembly pack: correct POWER9 results.
[thirdparty/openssl.git] / crypto / chacha / asm / chacha-ppc.pl
1 #! /usr/bin/env perl
2 # Copyright 2016-2018 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9 #
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16 #
17 # October 2015
18 #
19 # ChaCha20 for PowerPC/AltiVec.
20 #
21 # Performance in cycles per byte out of large buffer.
22 #
23 # IALU/gcc-4.x 3xAltiVec+1xIALU
24 #
25 # Freescale e300 13.6/+115% -
26 # PPC74x0/G4e 6.81/+310% 3.72
27 # PPC970/G5 9.29/+160% ?
28 # POWER7 8.62/+61% 3.38
29 # POWER8 8.70/+51% 3.36
30 # POWER9 8.80/+29% 4.50(*)
31 #
32 # (*) this is trade-off result, it's possible to improve it, but
33 # then it would negatively affect all others;
34
35 $flavour = shift;
36
37 if ($flavour =~ /64/) {
38 $SIZE_T =8;
39 $LRSAVE =2*$SIZE_T;
40 $STU ="stdu";
41 $POP ="ld";
42 $PUSH ="std";
43 $UCMP ="cmpld";
44 } elsif ($flavour =~ /32/) {
45 $SIZE_T =4;
46 $LRSAVE =$SIZE_T;
47 $STU ="stwu";
48 $POP ="lwz";
49 $PUSH ="stw";
50 $UCMP ="cmplw";
51 } else { die "nonsense $flavour"; }
52
53 $LITTLE_ENDIAN = ($flavour=~/le$/) ? 1 : 0;
54
55 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
56 ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
57 ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
58 die "can't locate ppc-xlate.pl";
59
60 open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
61
62 $LOCALS=6*$SIZE_T;
63 $FRAME=$LOCALS+64+18*$SIZE_T; # 64 is for local variables
64
65 sub AUTOLOAD() # thunk [simplified] x86-style perlasm
66 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
67 $code .= "\t$opcode\t".join(',',@_)."\n";
68 }
69
70 my $sp = "r1";
71
72 my ($out,$inp,$len,$key,$ctr) = map("r$_",(3..7));
73
74 my @x=map("r$_",(16..31));
75 my @d=map("r$_",(11,12,14,15));
76 my @t=map("r$_",(7..10));
77
78 sub ROUND {
79 my ($a0,$b0,$c0,$d0)=@_;
80 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
81 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
82 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
83
84 (
85 "&add (@x[$a0],@x[$a0],@x[$b0])",
86 "&add (@x[$a1],@x[$a1],@x[$b1])",
87 "&add (@x[$a2],@x[$a2],@x[$b2])",
88 "&add (@x[$a3],@x[$a3],@x[$b3])",
89 "&xor (@x[$d0],@x[$d0],@x[$a0])",
90 "&xor (@x[$d1],@x[$d1],@x[$a1])",
91 "&xor (@x[$d2],@x[$d2],@x[$a2])",
92 "&xor (@x[$d3],@x[$d3],@x[$a3])",
93 "&rotlwi (@x[$d0],@x[$d0],16)",
94 "&rotlwi (@x[$d1],@x[$d1],16)",
95 "&rotlwi (@x[$d2],@x[$d2],16)",
96 "&rotlwi (@x[$d3],@x[$d3],16)",
97
98 "&add (@x[$c0],@x[$c0],@x[$d0])",
99 "&add (@x[$c1],@x[$c1],@x[$d1])",
100 "&add (@x[$c2],@x[$c2],@x[$d2])",
101 "&add (@x[$c3],@x[$c3],@x[$d3])",
102 "&xor (@x[$b0],@x[$b0],@x[$c0])",
103 "&xor (@x[$b1],@x[$b1],@x[$c1])",
104 "&xor (@x[$b2],@x[$b2],@x[$c2])",
105 "&xor (@x[$b3],@x[$b3],@x[$c3])",
106 "&rotlwi (@x[$b0],@x[$b0],12)",
107 "&rotlwi (@x[$b1],@x[$b1],12)",
108 "&rotlwi (@x[$b2],@x[$b2],12)",
109 "&rotlwi (@x[$b3],@x[$b3],12)",
110
111 "&add (@x[$a0],@x[$a0],@x[$b0])",
112 "&add (@x[$a1],@x[$a1],@x[$b1])",
113 "&add (@x[$a2],@x[$a2],@x[$b2])",
114 "&add (@x[$a3],@x[$a3],@x[$b3])",
115 "&xor (@x[$d0],@x[$d0],@x[$a0])",
116 "&xor (@x[$d1],@x[$d1],@x[$a1])",
117 "&xor (@x[$d2],@x[$d2],@x[$a2])",
118 "&xor (@x[$d3],@x[$d3],@x[$a3])",
119 "&rotlwi (@x[$d0],@x[$d0],8)",
120 "&rotlwi (@x[$d1],@x[$d1],8)",
121 "&rotlwi (@x[$d2],@x[$d2],8)",
122 "&rotlwi (@x[$d3],@x[$d3],8)",
123
124 "&add (@x[$c0],@x[$c0],@x[$d0])",
125 "&add (@x[$c1],@x[$c1],@x[$d1])",
126 "&add (@x[$c2],@x[$c2],@x[$d2])",
127 "&add (@x[$c3],@x[$c3],@x[$d3])",
128 "&xor (@x[$b0],@x[$b0],@x[$c0])",
129 "&xor (@x[$b1],@x[$b1],@x[$c1])",
130 "&xor (@x[$b2],@x[$b2],@x[$c2])",
131 "&xor (@x[$b3],@x[$b3],@x[$c3])",
132 "&rotlwi (@x[$b0],@x[$b0],7)",
133 "&rotlwi (@x[$b1],@x[$b1],7)",
134 "&rotlwi (@x[$b2],@x[$b2],7)",
135 "&rotlwi (@x[$b3],@x[$b3],7)"
136 );
137 }
138
139 $code.=<<___;
140 .machine "any"
141 .text
142
143 .globl .ChaCha20_ctr32_int
144 .align 5
145 .ChaCha20_ctr32_int:
146 __ChaCha20_ctr32_int:
147 ${UCMP}i $len,0
148 beqlr-
149
150 $STU $sp,-$FRAME($sp)
151 mflr r0
152
153 $PUSH r14,`$FRAME-$SIZE_T*18`($sp)
154 $PUSH r15,`$FRAME-$SIZE_T*17`($sp)
155 $PUSH r16,`$FRAME-$SIZE_T*16`($sp)
156 $PUSH r17,`$FRAME-$SIZE_T*15`($sp)
157 $PUSH r18,`$FRAME-$SIZE_T*14`($sp)
158 $PUSH r19,`$FRAME-$SIZE_T*13`($sp)
159 $PUSH r20,`$FRAME-$SIZE_T*12`($sp)
160 $PUSH r21,`$FRAME-$SIZE_T*11`($sp)
161 $PUSH r22,`$FRAME-$SIZE_T*10`($sp)
162 $PUSH r23,`$FRAME-$SIZE_T*9`($sp)
163 $PUSH r24,`$FRAME-$SIZE_T*8`($sp)
164 $PUSH r25,`$FRAME-$SIZE_T*7`($sp)
165 $PUSH r26,`$FRAME-$SIZE_T*6`($sp)
166 $PUSH r27,`$FRAME-$SIZE_T*5`($sp)
167 $PUSH r28,`$FRAME-$SIZE_T*4`($sp)
168 $PUSH r29,`$FRAME-$SIZE_T*3`($sp)
169 $PUSH r30,`$FRAME-$SIZE_T*2`($sp)
170 $PUSH r31,`$FRAME-$SIZE_T*1`($sp)
171 $PUSH r0,`$FRAME+$LRSAVE`($sp)
172
173 lwz @d[0],0($ctr) # load counter
174 lwz @d[1],4($ctr)
175 lwz @d[2],8($ctr)
176 lwz @d[3],12($ctr)
177
178 bl __ChaCha20_1x
179
180 $POP r0,`$FRAME+$LRSAVE`($sp)
181 $POP r14,`$FRAME-$SIZE_T*18`($sp)
182 $POP r15,`$FRAME-$SIZE_T*17`($sp)
183 $POP r16,`$FRAME-$SIZE_T*16`($sp)
184 $POP r17,`$FRAME-$SIZE_T*15`($sp)
185 $POP r18,`$FRAME-$SIZE_T*14`($sp)
186 $POP r19,`$FRAME-$SIZE_T*13`($sp)
187 $POP r20,`$FRAME-$SIZE_T*12`($sp)
188 $POP r21,`$FRAME-$SIZE_T*11`($sp)
189 $POP r22,`$FRAME-$SIZE_T*10`($sp)
190 $POP r23,`$FRAME-$SIZE_T*9`($sp)
191 $POP r24,`$FRAME-$SIZE_T*8`($sp)
192 $POP r25,`$FRAME-$SIZE_T*7`($sp)
193 $POP r26,`$FRAME-$SIZE_T*6`($sp)
194 $POP r27,`$FRAME-$SIZE_T*5`($sp)
195 $POP r28,`$FRAME-$SIZE_T*4`($sp)
196 $POP r29,`$FRAME-$SIZE_T*3`($sp)
197 $POP r30,`$FRAME-$SIZE_T*2`($sp)
198 $POP r31,`$FRAME-$SIZE_T*1`($sp)
199 mtlr r0
200 addi $sp,$sp,$FRAME
201 blr
202 .long 0
203 .byte 0,12,4,1,0x80,18,5,0
204 .long 0
205 .size .ChaCha20_ctr32_int,.-.ChaCha20_ctr32_int
206
207 .align 5
208 __ChaCha20_1x:
209 Loop_outer:
210 lis @x[0],0x6170 # synthesize sigma
211 lis @x[1],0x3320
212 lis @x[2],0x7962
213 lis @x[3],0x6b20
214 ori @x[0],@x[0],0x7865
215 ori @x[1],@x[1],0x646e
216 ori @x[2],@x[2],0x2d32
217 ori @x[3],@x[3],0x6574
218
219 li r0,10 # inner loop counter
220 lwz @x[4],0($key) # load key
221 lwz @x[5],4($key)
222 lwz @x[6],8($key)
223 lwz @x[7],12($key)
224 lwz @x[8],16($key)
225 mr @x[12],@d[0] # copy counter
226 lwz @x[9],20($key)
227 mr @x[13],@d[1]
228 lwz @x[10],24($key)
229 mr @x[14],@d[2]
230 lwz @x[11],28($key)
231 mr @x[15],@d[3]
232
233 mr @t[0],@x[4]
234 mr @t[1],@x[5]
235 mr @t[2],@x[6]
236 mr @t[3],@x[7]
237
238 mtctr r0
239 Loop:
240 ___
241 foreach (&ROUND(0, 4, 8,12)) { eval; }
242 foreach (&ROUND(0, 5,10,15)) { eval; }
243 $code.=<<___;
244 bdnz Loop
245
246 subic $len,$len,64 # $len-=64
247 addi @x[0],@x[0],0x7865 # accumulate key block
248 addi @x[1],@x[1],0x646e
249 addi @x[2],@x[2],0x2d32
250 addi @x[3],@x[3],0x6574
251 addis @x[0],@x[0],0x6170
252 addis @x[1],@x[1],0x3320
253 addis @x[2],@x[2],0x7962
254 addis @x[3],@x[3],0x6b20
255
256 subfe. r0,r0,r0 # borrow?-1:0
257 add @x[4],@x[4],@t[0]
258 lwz @t[0],16($key)
259 add @x[5],@x[5],@t[1]
260 lwz @t[1],20($key)
261 add @x[6],@x[6],@t[2]
262 lwz @t[2],24($key)
263 add @x[7],@x[7],@t[3]
264 lwz @t[3],28($key)
265 add @x[8],@x[8],@t[0]
266 add @x[9],@x[9],@t[1]
267 add @x[10],@x[10],@t[2]
268 add @x[11],@x[11],@t[3]
269
270 add @x[12],@x[12],@d[0]
271 add @x[13],@x[13],@d[1]
272 add @x[14],@x[14],@d[2]
273 add @x[15],@x[15],@d[3]
274 addi @d[0],@d[0],1 # increment counter
275 ___
276 if (!$LITTLE_ENDIAN) { for($i=0;$i<16;$i++) { # flip byte order
277 $code.=<<___;
278 mr @t[$i&3],@x[$i]
279 rotlwi @x[$i],@x[$i],8
280 rlwimi @x[$i],@t[$i&3],24,0,7
281 rlwimi @x[$i],@t[$i&3],24,16,23
282 ___
283 } }
284 $code.=<<___;
285 bne Ltail # $len-=64 borrowed
286
287 lwz @t[0],0($inp) # load input, aligned or not
288 lwz @t[1],4($inp)
289 ${UCMP}i $len,0 # done already?
290 lwz @t[2],8($inp)
291 lwz @t[3],12($inp)
292 xor @x[0],@x[0],@t[0] # xor with input
293 lwz @t[0],16($inp)
294 xor @x[1],@x[1],@t[1]
295 lwz @t[1],20($inp)
296 xor @x[2],@x[2],@t[2]
297 lwz @t[2],24($inp)
298 xor @x[3],@x[3],@t[3]
299 lwz @t[3],28($inp)
300 xor @x[4],@x[4],@t[0]
301 lwz @t[0],32($inp)
302 xor @x[5],@x[5],@t[1]
303 lwz @t[1],36($inp)
304 xor @x[6],@x[6],@t[2]
305 lwz @t[2],40($inp)
306 xor @x[7],@x[7],@t[3]
307 lwz @t[3],44($inp)
308 xor @x[8],@x[8],@t[0]
309 lwz @t[0],48($inp)
310 xor @x[9],@x[9],@t[1]
311 lwz @t[1],52($inp)
312 xor @x[10],@x[10],@t[2]
313 lwz @t[2],56($inp)
314 xor @x[11],@x[11],@t[3]
315 lwz @t[3],60($inp)
316 xor @x[12],@x[12],@t[0]
317 stw @x[0],0($out) # store output, aligned or not
318 xor @x[13],@x[13],@t[1]
319 stw @x[1],4($out)
320 xor @x[14],@x[14],@t[2]
321 stw @x[2],8($out)
322 xor @x[15],@x[15],@t[3]
323 stw @x[3],12($out)
324 stw @x[4],16($out)
325 stw @x[5],20($out)
326 stw @x[6],24($out)
327 stw @x[7],28($out)
328 stw @x[8],32($out)
329 stw @x[9],36($out)
330 stw @x[10],40($out)
331 stw @x[11],44($out)
332 stw @x[12],48($out)
333 stw @x[13],52($out)
334 stw @x[14],56($out)
335 addi $inp,$inp,64
336 stw @x[15],60($out)
337 addi $out,$out,64
338
339 bne Loop_outer
340
341 blr
342
343 .align 4
344 Ltail:
345 addi $len,$len,64 # restore tail length
346 subi $inp,$inp,1 # prepare for *++ptr
347 subi $out,$out,1
348 addi @t[0],$sp,$LOCALS-1
349 mtctr $len
350
351 stw @x[0],`$LOCALS+0`($sp) # save whole block to stack
352 stw @x[1],`$LOCALS+4`($sp)
353 stw @x[2],`$LOCALS+8`($sp)
354 stw @x[3],`$LOCALS+12`($sp)
355 stw @x[4],`$LOCALS+16`($sp)
356 stw @x[5],`$LOCALS+20`($sp)
357 stw @x[6],`$LOCALS+24`($sp)
358 stw @x[7],`$LOCALS+28`($sp)
359 stw @x[8],`$LOCALS+32`($sp)
360 stw @x[9],`$LOCALS+36`($sp)
361 stw @x[10],`$LOCALS+40`($sp)
362 stw @x[11],`$LOCALS+44`($sp)
363 stw @x[12],`$LOCALS+48`($sp)
364 stw @x[13],`$LOCALS+52`($sp)
365 stw @x[14],`$LOCALS+56`($sp)
366 stw @x[15],`$LOCALS+60`($sp)
367
368 Loop_tail: # byte-by-byte loop
369 lbzu @d[0],1($inp)
370 lbzu @x[0],1(@t[0])
371 xor @d[1],@d[0],@x[0]
372 stbu @d[1],1($out)
373 bdnz Loop_tail
374
375 stw $sp,`$LOCALS+0`($sp) # wipe block on stack
376 stw $sp,`$LOCALS+4`($sp)
377 stw $sp,`$LOCALS+8`($sp)
378 stw $sp,`$LOCALS+12`($sp)
379 stw $sp,`$LOCALS+16`($sp)
380 stw $sp,`$LOCALS+20`($sp)
381 stw $sp,`$LOCALS+24`($sp)
382 stw $sp,`$LOCALS+28`($sp)
383 stw $sp,`$LOCALS+32`($sp)
384 stw $sp,`$LOCALS+36`($sp)
385 stw $sp,`$LOCALS+40`($sp)
386 stw $sp,`$LOCALS+44`($sp)
387 stw $sp,`$LOCALS+48`($sp)
388 stw $sp,`$LOCALS+52`($sp)
389 stw $sp,`$LOCALS+56`($sp)
390 stw $sp,`$LOCALS+60`($sp)
391
392 blr
393 .long 0
394 .byte 0,12,0x14,0,0,0,0,0
395 ___
396
397 {{{
398 my ($A0,$B0,$C0,$D0,$A1,$B1,$C1,$D1,$A2,$B2,$C2,$D2)
399 = map("v$_",(0..11));
400 my @K = map("v$_",(12..17));
401 my ($FOUR,$sixteen,$twenty4) = map("v$_",(18..20));
402 my ($inpperm,$outperm,$outmask) = map("v$_",(21..23));
403 my @D = map("v$_",(24..28));
404 my ($twelve,$seven,$T0,$T1) = @D;
405
406 my $FRAME=$LOCALS+64+10*16+18*$SIZE_T; # 10*16 is for v20-v28 offload
407
408 sub VMXROUND {
409 my $odd = pop;
410 my ($a,$b,$c,$d)=@_;
411
412 (
413 "&vadduwm ('$a','$a','$b')",
414 "&vxor ('$d','$d','$a')",
415 "&vperm ('$d','$d','$d','$sixteen')",
416
417 "&vadduwm ('$c','$c','$d')",
418 "&vxor ('$b','$b','$c')",
419 "&vrlw ('$b','$b','$twelve')",
420
421 "&vadduwm ('$a','$a','$b')",
422 "&vxor ('$d','$d','$a')",
423 "&vperm ('$d','$d','$d','$twenty4')",
424
425 "&vadduwm ('$c','$c','$d')",
426 "&vxor ('$b','$b','$c')",
427 "&vrlw ('$b','$b','$seven')",
428
429 "&vsldoi ('$c','$c','$c',8)",
430 "&vsldoi ('$b','$b','$b',$odd?4:12)",
431 "&vsldoi ('$d','$d','$d',$odd?12:4)"
432 );
433 }
434
435 $code.=<<___;
436
437 .globl .ChaCha20_ctr32_vmx
438 .align 5
439 .ChaCha20_ctr32_vmx:
440 ${UCMP}i $len,256
441 blt __ChaCha20_ctr32_int
442
443 $STU $sp,-$FRAME($sp)
444 mflr r0
445 li r10,`15+$LOCALS+64`
446 li r11,`31+$LOCALS+64`
447 mfspr r12,256
448 stvx v20,r10,$sp
449 addi r10,r10,32
450 stvx v21,r11,$sp
451 addi r11,r11,32
452 stvx v22,r10,$sp
453 addi r10,r10,32
454 stvx v23,r11,$sp
455 addi r11,r11,32
456 stvx v24,r10,$sp
457 addi r10,r10,32
458 stvx v25,r11,$sp
459 addi r11,r11,32
460 stvx v26,r10,$sp
461 addi r10,r10,32
462 stvx v27,r11,$sp
463 stvx v28,r10,$sp
464 stw r12,`$FRAME-$SIZE_T*18-4`($sp) # save vrsave
465 $PUSH r14,`$FRAME-$SIZE_T*18`($sp)
466 $PUSH r15,`$FRAME-$SIZE_T*17`($sp)
467 $PUSH r16,`$FRAME-$SIZE_T*16`($sp)
468 $PUSH r17,`$FRAME-$SIZE_T*15`($sp)
469 $PUSH r18,`$FRAME-$SIZE_T*14`($sp)
470 $PUSH r19,`$FRAME-$SIZE_T*13`($sp)
471 $PUSH r20,`$FRAME-$SIZE_T*12`($sp)
472 $PUSH r21,`$FRAME-$SIZE_T*11`($sp)
473 $PUSH r22,`$FRAME-$SIZE_T*10`($sp)
474 $PUSH r23,`$FRAME-$SIZE_T*9`($sp)
475 $PUSH r24,`$FRAME-$SIZE_T*8`($sp)
476 $PUSH r25,`$FRAME-$SIZE_T*7`($sp)
477 $PUSH r26,`$FRAME-$SIZE_T*6`($sp)
478 $PUSH r27,`$FRAME-$SIZE_T*5`($sp)
479 $PUSH r28,`$FRAME-$SIZE_T*4`($sp)
480 $PUSH r29,`$FRAME-$SIZE_T*3`($sp)
481 $PUSH r30,`$FRAME-$SIZE_T*2`($sp)
482 $PUSH r31,`$FRAME-$SIZE_T*1`($sp)
483 li r12,-8
484 $PUSH r0, `$FRAME+$LRSAVE`($sp)
485 mtspr 256,r12 # preserve 29 AltiVec registers
486
487 bl Lconsts # returns pointer Lsigma in r12
488 li @x[0],16
489 li @x[1],32
490 li @x[2],48
491 li @x[3],64
492 li @x[4],31 # 31 is not a typo
493 li @x[5],15 # nor is 15
494
495 lvx @K[1],0,$key # load key
496 ?lvsr $T0,0,$key # prepare unaligned load
497 lvx @K[2],@x[0],$key
498 lvx @D[0],@x[4],$key
499
500 lvx @K[3],0,$ctr # load counter
501 ?lvsr $T1,0,$ctr # prepare unaligned load
502 lvx @D[1],@x[5],$ctr
503
504 lvx @K[0],0,r12 # load constants
505 lvx @K[5],@x[0],r12 # one
506 lvx $FOUR,@x[1],r12
507 lvx $sixteen,@x[2],r12
508 lvx $twenty4,@x[3],r12
509
510 ?vperm @K[1],@K[2],@K[1],$T0 # align key
511 ?vperm @K[2],@D[0],@K[2],$T0
512 ?vperm @K[3],@D[1],@K[3],$T1 # align counter
513
514 lwz @d[0],0($ctr) # load counter to GPR
515 lwz @d[1],4($ctr)
516 vadduwm @K[3],@K[3],@K[5] # adjust AltiVec counter
517 lwz @d[2],8($ctr)
518 vadduwm @K[4],@K[3],@K[5]
519 lwz @d[3],12($ctr)
520 vadduwm @K[5],@K[4],@K[5]
521
522 vxor $T0,$T0,$T0 # 0x00..00
523 vspltisw $outmask,-1 # 0xff..ff
524 ?lvsr $inpperm,0,$inp # prepare for unaligned load
525 ?lvsl $outperm,0,$out # prepare for unaligned store
526 ?vperm $outmask,$outmask,$T0,$outperm
527
528 be?lvsl $T0,0,@x[0] # 0x00..0f
529 be?vspltisb $T1,3 # 0x03..03
530 be?vxor $T0,$T0,$T1 # swap bytes within words
531 be?vxor $outperm,$outperm,$T1
532 be?vperm $inpperm,$inpperm,$inpperm,$T0
533
534 li r0,10 # inner loop counter
535 b Loop_outer_vmx
536
537 .align 4
538 Loop_outer_vmx:
539 lis @x[0],0x6170 # synthesize sigma
540 lis @x[1],0x3320
541 vmr $A0,@K[0]
542 lis @x[2],0x7962
543 lis @x[3],0x6b20
544 vmr $A1,@K[0]
545 ori @x[0],@x[0],0x7865
546 ori @x[1],@x[1],0x646e
547 vmr $A2,@K[0]
548 ori @x[2],@x[2],0x2d32
549 ori @x[3],@x[3],0x6574
550 vmr $B0,@K[1]
551
552 lwz @x[4],0($key) # load key to GPR
553 vmr $B1,@K[1]
554 lwz @x[5],4($key)
555 vmr $B2,@K[1]
556 lwz @x[6],8($key)
557 vmr $C0,@K[2]
558 lwz @x[7],12($key)
559 vmr $C1,@K[2]
560 lwz @x[8],16($key)
561 vmr $C2,@K[2]
562 mr @x[12],@d[0] # copy GPR counter
563 lwz @x[9],20($key)
564 vmr $D0,@K[3]
565 mr @x[13],@d[1]
566 lwz @x[10],24($key)
567 vmr $D1,@K[4]
568 mr @x[14],@d[2]
569 lwz @x[11],28($key)
570 vmr $D2,@K[5]
571 mr @x[15],@d[3]
572
573 mr @t[0],@x[4]
574 mr @t[1],@x[5]
575 mr @t[2],@x[6]
576 mr @t[3],@x[7]
577
578 vspltisw $twelve,12 # synthesize constants
579 vspltisw $seven,7
580
581 mtctr r0
582 nop
583 Loop_vmx:
584 ___
585 my @thread0=&VMXROUND($A0,$B0,$C0,$D0,0);
586 my @thread1=&VMXROUND($A1,$B1,$C1,$D1,0);
587 my @thread2=&VMXROUND($A2,$B2,$C2,$D2,0);
588 my @thread3=&ROUND(0,4,8,12);
589
590 foreach (@thread0) {
591 eval; eval(shift(@thread3));
592 eval(shift(@thread1)); eval(shift(@thread3));
593 eval(shift(@thread2)); eval(shift(@thread3));
594 }
595 foreach (@thread3) { eval; }
596
597 @thread0=&VMXROUND($A0,$B0,$C0,$D0,1);
598 @thread1=&VMXROUND($A1,$B1,$C1,$D1,1);
599 @thread2=&VMXROUND($A2,$B2,$C2,$D2,1);
600 @thread3=&ROUND(0,5,10,15);
601
602 foreach (@thread0) {
603 eval; eval(shift(@thread3));
604 eval(shift(@thread1)); eval(shift(@thread3));
605 eval(shift(@thread2)); eval(shift(@thread3));
606 }
607 foreach (@thread3) { eval; }
608 $code.=<<___;
609 bdnz Loop_vmx
610
611 subi $len,$len,256 # $len-=256
612 addi @x[0],@x[0],0x7865 # accumulate key block
613 addi @x[1],@x[1],0x646e
614 addi @x[2],@x[2],0x2d32
615 addi @x[3],@x[3],0x6574
616 addis @x[0],@x[0],0x6170
617 addis @x[1],@x[1],0x3320
618 addis @x[2],@x[2],0x7962
619 addis @x[3],@x[3],0x6b20
620 add @x[4],@x[4],@t[0]
621 lwz @t[0],16($key)
622 add @x[5],@x[5],@t[1]
623 lwz @t[1],20($key)
624 add @x[6],@x[6],@t[2]
625 lwz @t[2],24($key)
626 add @x[7],@x[7],@t[3]
627 lwz @t[3],28($key)
628 add @x[8],@x[8],@t[0]
629 add @x[9],@x[9],@t[1]
630 add @x[10],@x[10],@t[2]
631 add @x[11],@x[11],@t[3]
632 add @x[12],@x[12],@d[0]
633 add @x[13],@x[13],@d[1]
634 add @x[14],@x[14],@d[2]
635 add @x[15],@x[15],@d[3]
636
637 vadduwm $A0,$A0,@K[0] # accumulate key block
638 vadduwm $A1,$A1,@K[0]
639 vadduwm $A2,$A2,@K[0]
640 vadduwm $B0,$B0,@K[1]
641 vadduwm $B1,$B1,@K[1]
642 vadduwm $B2,$B2,@K[1]
643 vadduwm $C0,$C0,@K[2]
644 vadduwm $C1,$C1,@K[2]
645 vadduwm $C2,$C2,@K[2]
646 vadduwm $D0,$D0,@K[3]
647 vadduwm $D1,$D1,@K[4]
648 vadduwm $D2,$D2,@K[5]
649
650 addi @d[0],@d[0],4 # increment counter
651 vadduwm @K[3],@K[3],$FOUR
652 vadduwm @K[4],@K[4],$FOUR
653 vadduwm @K[5],@K[5],$FOUR
654
655 ___
656 if (!$LITTLE_ENDIAN) { for($i=0;$i<16;$i++) { # flip byte order
657 $code.=<<___;
658 mr @t[$i&3],@x[$i]
659 rotlwi @x[$i],@x[$i],8
660 rlwimi @x[$i],@t[$i&3],24,0,7
661 rlwimi @x[$i],@t[$i&3],24,16,23
662 ___
663 } }
664 $code.=<<___;
665 lwz @t[0],0($inp) # load input, aligned or not
666 lwz @t[1],4($inp)
667 lwz @t[2],8($inp)
668 lwz @t[3],12($inp)
669 xor @x[0],@x[0],@t[0] # xor with input
670 lwz @t[0],16($inp)
671 xor @x[1],@x[1],@t[1]
672 lwz @t[1],20($inp)
673 xor @x[2],@x[2],@t[2]
674 lwz @t[2],24($inp)
675 xor @x[3],@x[3],@t[3]
676 lwz @t[3],28($inp)
677 xor @x[4],@x[4],@t[0]
678 lwz @t[0],32($inp)
679 xor @x[5],@x[5],@t[1]
680 lwz @t[1],36($inp)
681 xor @x[6],@x[6],@t[2]
682 lwz @t[2],40($inp)
683 xor @x[7],@x[7],@t[3]
684 lwz @t[3],44($inp)
685 xor @x[8],@x[8],@t[0]
686 lwz @t[0],48($inp)
687 xor @x[9],@x[9],@t[1]
688 lwz @t[1],52($inp)
689 xor @x[10],@x[10],@t[2]
690 lwz @t[2],56($inp)
691 xor @x[11],@x[11],@t[3]
692 lwz @t[3],60($inp)
693 xor @x[12],@x[12],@t[0]
694 stw @x[0],0($out) # store output, aligned or not
695 xor @x[13],@x[13],@t[1]
696 stw @x[1],4($out)
697 xor @x[14],@x[14],@t[2]
698 stw @x[2],8($out)
699 xor @x[15],@x[15],@t[3]
700 stw @x[3],12($out)
701 addi $inp,$inp,64
702 stw @x[4],16($out)
703 li @t[0],16
704 stw @x[5],20($out)
705 li @t[1],32
706 stw @x[6],24($out)
707 li @t[2],48
708 stw @x[7],28($out)
709 li @t[3],64
710 stw @x[8],32($out)
711 stw @x[9],36($out)
712 stw @x[10],40($out)
713 stw @x[11],44($out)
714 stw @x[12],48($out)
715 stw @x[13],52($out)
716 stw @x[14],56($out)
717 stw @x[15],60($out)
718 addi $out,$out,64
719
720 lvx @D[0],0,$inp # load input
721 lvx @D[1],@t[0],$inp
722 lvx @D[2],@t[1],$inp
723 lvx @D[3],@t[2],$inp
724 lvx @D[4],@t[3],$inp
725 addi $inp,$inp,64
726
727 ?vperm @D[0],@D[1],@D[0],$inpperm # align input
728 ?vperm @D[1],@D[2],@D[1],$inpperm
729 ?vperm @D[2],@D[3],@D[2],$inpperm
730 ?vperm @D[3],@D[4],@D[3],$inpperm
731 vxor $A0,$A0,@D[0] # xor with input
732 vxor $B0,$B0,@D[1]
733 lvx @D[1],@t[0],$inp # keep loading input
734 vxor $C0,$C0,@D[2]
735 lvx @D[2],@t[1],$inp
736 vxor $D0,$D0,@D[3]
737 lvx @D[3],@t[2],$inp
738 lvx @D[0],@t[3],$inp
739 addi $inp,$inp,64
740 li @t[3],63 # 63 is not a typo
741 vperm $A0,$A0,$A0,$outperm # pre-misalign output
742 vperm $B0,$B0,$B0,$outperm
743 vperm $C0,$C0,$C0,$outperm
744 vperm $D0,$D0,$D0,$outperm
745
746 ?vperm @D[4],@D[1],@D[4],$inpperm # align input
747 ?vperm @D[1],@D[2],@D[1],$inpperm
748 ?vperm @D[2],@D[3],@D[2],$inpperm
749 ?vperm @D[3],@D[0],@D[3],$inpperm
750 vxor $A1,$A1,@D[4]
751 vxor $B1,$B1,@D[1]
752 lvx @D[1],@t[0],$inp # keep loading input
753 vxor $C1,$C1,@D[2]
754 lvx @D[2],@t[1],$inp
755 vxor $D1,$D1,@D[3]
756 lvx @D[3],@t[2],$inp
757 lvx @D[4],@t[3],$inp # redundant in aligned case
758 addi $inp,$inp,64
759 vperm $A1,$A1,$A1,$outperm # pre-misalign output
760 vperm $B1,$B1,$B1,$outperm
761 vperm $C1,$C1,$C1,$outperm
762 vperm $D1,$D1,$D1,$outperm
763
764 ?vperm @D[0],@D[1],@D[0],$inpperm # align input
765 ?vperm @D[1],@D[2],@D[1],$inpperm
766 ?vperm @D[2],@D[3],@D[2],$inpperm
767 ?vperm @D[3],@D[4],@D[3],$inpperm
768 vxor $A2,$A2,@D[0]
769 vxor $B2,$B2,@D[1]
770 vxor $C2,$C2,@D[2]
771 vxor $D2,$D2,@D[3]
772 vperm $A2,$A2,$A2,$outperm # pre-misalign output
773 vperm $B2,$B2,$B2,$outperm
774 vperm $C2,$C2,$C2,$outperm
775 vperm $D2,$D2,$D2,$outperm
776
777 andi. @x[1],$out,15 # is $out aligned?
778 mr @x[0],$out
779
780 vsel @D[0],$A0,$B0,$outmask # collect pre-misaligned output
781 vsel @D[1],$B0,$C0,$outmask
782 vsel @D[2],$C0,$D0,$outmask
783 vsel @D[3],$D0,$A1,$outmask
784 vsel $B0,$A1,$B1,$outmask
785 vsel $C0,$B1,$C1,$outmask
786 vsel $D0,$C1,$D1,$outmask
787 vsel $A1,$D1,$A2,$outmask
788 vsel $B1,$A2,$B2,$outmask
789 vsel $C1,$B2,$C2,$outmask
790 vsel $D1,$C2,$D2,$outmask
791
792 #stvx $A0,0,$out # take it easy on the edges
793 stvx @D[0],@t[0],$out # store output
794 stvx @D[1],@t[1],$out
795 stvx @D[2],@t[2],$out
796 addi $out,$out,64
797 stvx @D[3],0,$out
798 stvx $B0,@t[0],$out
799 stvx $C0,@t[1],$out
800 stvx $D0,@t[2],$out
801 addi $out,$out,64
802 stvx $A1,0,$out
803 stvx $B1,@t[0],$out
804 stvx $C1,@t[1],$out
805 stvx $D1,@t[2],$out
806 addi $out,$out,64
807
808 beq Laligned_vmx
809
810 sub @x[2],$out,@x[1] # in misaligned case edges
811 li @x[3],0 # are written byte-by-byte
812 Lunaligned_tail_vmx:
813 stvebx $D2,@x[3],@x[2]
814 addi @x[3],@x[3],1
815 cmpw @x[3],@x[1]
816 bne Lunaligned_tail_vmx
817
818 sub @x[2],@x[0],@x[1]
819 Lunaligned_head_vmx:
820 stvebx $A0,@x[1],@x[2]
821 cmpwi @x[1],15
822 addi @x[1],@x[1],1
823 bne Lunaligned_head_vmx
824
825 ${UCMP}i $len,255 # done with 256-byte blocks yet?
826 bgt Loop_outer_vmx
827
828 b Ldone_vmx
829
830 .align 4
831 Laligned_vmx:
832 stvx $A0,0,@x[0] # head hexaword was not stored
833
834 ${UCMP}i $len,255 # done with 256-byte blocks yet?
835 bgt Loop_outer_vmx
836 nop
837
838 Ldone_vmx:
839 ${UCMP}i $len,0 # done yet?
840 bnel __ChaCha20_1x
841
842 lwz r12,`$FRAME-$SIZE_T*18-4`($sp) # pull vrsave
843 li r10,`15+$LOCALS+64`
844 li r11,`31+$LOCALS+64`
845 mtspr 256,r12 # restore vrsave
846 lvx v20,r10,$sp
847 addi r10,r10,32
848 lvx v21,r11,$sp
849 addi r11,r11,32
850 lvx v22,r10,$sp
851 addi r10,r10,32
852 lvx v23,r11,$sp
853 addi r11,r11,32
854 lvx v24,r10,$sp
855 addi r10,r10,32
856 lvx v25,r11,$sp
857 addi r11,r11,32
858 lvx v26,r10,$sp
859 addi r10,r10,32
860 lvx v27,r11,$sp
861 lvx v28,r10,$sp
862 $POP r0, `$FRAME+$LRSAVE`($sp)
863 $POP r14,`$FRAME-$SIZE_T*18`($sp)
864 $POP r15,`$FRAME-$SIZE_T*17`($sp)
865 $POP r16,`$FRAME-$SIZE_T*16`($sp)
866 $POP r17,`$FRAME-$SIZE_T*15`($sp)
867 $POP r18,`$FRAME-$SIZE_T*14`($sp)
868 $POP r19,`$FRAME-$SIZE_T*13`($sp)
869 $POP r20,`$FRAME-$SIZE_T*12`($sp)
870 $POP r21,`$FRAME-$SIZE_T*11`($sp)
871 $POP r22,`$FRAME-$SIZE_T*10`($sp)
872 $POP r23,`$FRAME-$SIZE_T*9`($sp)
873 $POP r24,`$FRAME-$SIZE_T*8`($sp)
874 $POP r25,`$FRAME-$SIZE_T*7`($sp)
875 $POP r26,`$FRAME-$SIZE_T*6`($sp)
876 $POP r27,`$FRAME-$SIZE_T*5`($sp)
877 $POP r28,`$FRAME-$SIZE_T*4`($sp)
878 $POP r29,`$FRAME-$SIZE_T*3`($sp)
879 $POP r30,`$FRAME-$SIZE_T*2`($sp)
880 $POP r31,`$FRAME-$SIZE_T*1`($sp)
881 mtlr r0
882 addi $sp,$sp,$FRAME
883 blr
884 .long 0
885 .byte 0,12,0x04,1,0x80,18,5,0
886 .long 0
887 .size .ChaCha20_ctr32_vmx,.-.ChaCha20_ctr32_vmx
888
889 .align 5
890 Lconsts:
891 mflr r0
892 bcl 20,31,\$+4
893 mflr r12 #vvvvv "distance between . and Lsigma
894 addi r12,r12,`64-8`
895 mtlr r0
896 blr
897 .long 0
898 .byte 0,12,0x14,0,0,0,0,0
899 .space `64-9*4`
900 Lsigma:
901 .long 0x61707865,0x3320646e,0x79622d32,0x6b206574
902 .long 1,0,0,0
903 .long 4,0,0,0
904 ___
905 $code.=<<___ if ($LITTLE_ENDIAN);
906 .long 0x0e0f0c0d,0x0a0b0809,0x06070405,0x02030001
907 .long 0x0d0e0f0c,0x090a0b08,0x05060704,0x01020300
908 ___
909 $code.=<<___ if (!$LITTLE_ENDIAN); # flipped words
910 .long 0x02030001,0x06070405,0x0a0b0809,0x0e0f0c0d
911 .long 0x01020300,0x05060704,0x090a0b08,0x0d0e0f0c
912 ___
913 $code.=<<___;
914 .asciz "ChaCha20 for PowerPC/AltiVec, CRYPTOGAMS by <appro\@openssl.org>"
915 .align 2
916 ___
917 }}}
918
919 foreach (split("\n",$code)) {
920 s/\`([^\`]*)\`/eval $1/ge;
921
922 # instructions prefixed with '?' are endian-specific and need
923 # to be adjusted accordingly...
924 if ($flavour !~ /le$/) { # big-endian
925 s/be\?// or
926 s/le\?/#le#/ or
927 s/\?lvsr/lvsl/ or
928 s/\?lvsl/lvsr/ or
929 s/\?(vperm\s+v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+)/$1$3$2$4/ or
930 s/(vsldoi\s+v[0-9]+,\s*)(v[0-9]+,)\s*(v[0-9]+,\s*)([0-9]+)/$1$3$2 16-$4/;
931 } else { # little-endian
932 s/le\?// or
933 s/be\?/#be#/ or
934 s/\?([a-z]+)/$1/;
935 }
936
937 print $_,"\n";
938 }
939
940 close STDOUT;