2 # Copyright 2016-2020 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the Apache License 2.0 (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
19 # Initial support for Fujitsu SPARC64 X/X+ comprises minimally
20 # required key setup and single-block procedures.
24 # Add "teaser" CBC and CTR mode-specific subroutines. "Teaser" means
25 # that parallelizable nature of CBC decrypt and CTR is not utilized
26 # yet. CBC encrypt on the other hand is as good as it can possibly
27 # get processing one byte in 4.1 cycles with 128-bit key on SPARC64 X.
28 # This is ~6x faster than pure software implementation...
32 # Switch from faligndata to fshiftorx, which allows to omit alignaddr
33 # instructions and improve single-block and short-input performance
34 # with misaligned data.
36 $output = pop and open STDOUT
,">$output";
39 my ($inp,$out,$key,$rounds,$tmp,$mask) = map("%o$_",(0..5));
42 #include "sparc_arch.h"
44 #define LOCALS (STACK_BIAS+STACK_FRAME)
51 and $inp, 7, $tmp ! is input aligned?
53 ldd
[$key + 0], %f6 ! round
[0]
56 ld
[$key + 240], $rounds
59 add
%o7, .Linp_align
-1b
, %o7
62 ldd
[$inp + 0], %f0 ! load input
63 brz
,pt
$tmp, .Lenc_inp_aligned
66 ldd
[%o7 + $tmp], %f14 ! shift left params
68 fshiftorx
%f0, %f2, %f14, %f0
69 fshiftorx
%f2, %f4, %f14, %f2
72 ldd
[$key + 16], %f10 ! round
[1]
75 fxor
%f0, %f6, %f0 ! ^=round
[0]
77 ldd
[$key + 32], %f6 ! round
[2]
80 sub $rounds, 4, $rounds
84 faesencx
%f2, %f10, %f0
85 faesencx
%f4, %f12, %f2
91 faesencx
%f2, %f6, %f0
92 faesencx
%f4, %f8, %f2
96 brnz
,a
$rounds, .Loop_enc
97 sub $rounds, 2, $rounds
99 andcc
$out, 7, $tmp ! is output aligned?
102 srl
$mask, $tmp, $mask
107 faesencx
%f2, %f10, %f0
108 faesencx
%f4, %f12, %f2
109 ldd
[%o7 + $tmp], %f14 ! shift right params
112 faesenclx
%f2, %f6, %f0
113 faesenclx
%f4, %f8, %f2
115 bnz
,pn
%icc, .Lenc_out_unaligned
126 fshiftorx
%f0, %f0, %f14, %f4
127 fshiftorx
%f0, %f2, %f14, %f6
128 fshiftorx
%f2, %f2, %f14, %f8
130 stda
%f4, [$out + $mask]0xc0 ! partial store
132 stda
%f8, [$inp + $tmp]0xc0 ! partial store
135 .type aes_fx_encrypt
,#function
136 .size aes_fx_encrypt
,.-aes_fx_encrypt
138 .globl aes_fx_decrypt
141 and $inp, 7, $tmp ! is input aligned?
143 ldd
[$key + 0], %f6 ! round
[0]
146 ld
[$key + 240], $rounds
149 add
%o7, .Linp_align
-1b
, %o7
152 ldd
[$inp + 0], %f0 ! load input
153 brz
,pt
$tmp, .Ldec_inp_aligned
156 ldd
[%o7 + $tmp], %f14 ! shift left params
158 fshiftorx
%f0, %f2, %f14, %f0
159 fshiftorx
%f2, %f4, %f14, %f2
162 ldd
[$key + 16], %f10 ! round
[1]
163 ldd
[$key + 24], %f12
165 fxor
%f0, %f6, %f0 ! ^=round
[0]
167 ldd
[$key + 32], %f6 ! round
[2]
170 sub $rounds, 4, $rounds
174 faesdecx
%f2, %f10, %f0
175 faesdecx
%f4, %f12, %f2
176 ldd
[$key + 16], %f10
177 ldd
[$key + 24], %f12
181 faesdecx
%f2, %f6, %f0
182 faesdecx
%f4, %f8, %f2
186 brnz
,a
$rounds, .Loop_dec
187 sub $rounds, 2, $rounds
189 andcc
$out, 7, $tmp ! is output aligned?
192 srl
$mask, $tmp, $mask
197 faesdecx
%f2, %f10, %f0
198 faesdecx
%f4, %f12, %f2
199 ldd
[%o7 + $tmp], %f14 ! shift right params
202 faesdeclx
%f2, %f6, %f0
203 faesdeclx
%f4, %f8, %f2
205 bnz
,pn
%icc, .Ldec_out_unaligned
216 fshiftorx
%f0, %f0, %f14, %f4
217 fshiftorx
%f0, %f2, %f14, %f6
218 fshiftorx
%f2, %f2, %f14, %f8
220 stda
%f4, [$out + $mask]0xc0 ! partial store
222 stda
%f8, [$inp + $tmp]0xc0 ! partial store
225 .type aes_fx_decrypt
,#function
226 .size aes_fx_decrypt
,.-aes_fx_decrypt
230 my ($inp,$bits,$out,$tmp,$inc) = map("%o$_",(0..5));
232 .globl aes_fx_set_decrypt_key
234 aes_fx_set_decrypt_key
:
239 .type aes_fx_set_decrypt_key
,#function
240 .size aes_fx_set_decrypt_key
,.-aes_fx_set_decrypt_key
242 .globl aes_fx_set_encrypt_key
244 aes_fx_set_encrypt_key
:
254 add
%o7, .Linp_align
-1b
, %o7
256 ldd
[%o7 + $tmp], %f10 ! shift left params
266 brz
,pt
$tmp, .L256aligned
270 fshiftorx
%f0, %f2, %f10, %f0
271 fshiftorx
%f2, %f4, %f10, %f2
272 fshiftorx
%f4, %f6, %f10, %f4
273 fshiftorx
%f6, %f8, %f10, %f6
277 and $inc, `14*16`, $tmp
278 st
$bits, [$out + 240] ! store rounds
279 add
$out, $tmp, $out ! start
or end of key schedule
280 sllx
$inc, 4, $inc ! 16 or -16
282 for ($i=0; $i<6; $i++) {
285 faeskeyx
%f6, `0x10+$i`, %f0
288 faeskeyx
%f0, 0x00, %f2
290 faeskeyx
%f2, 0x01, %f4
293 faeskeyx
%f4, 0x00, %f6
298 faeskeyx
%f6, `0x10+$i`, %f0
301 faeskeyx
%f0, 0x00, %f2
308 xor %o0, %o0, %o0 ! return 0
312 brz
,pt
$tmp, .L192aligned
316 fshiftorx
%f0, %f2, %f10, %f0
317 fshiftorx
%f2, %f4, %f10, %f2
318 fshiftorx
%f4, %f6, %f10, %f4
322 and $inc, `12*16`, $tmp
323 st
$bits, [$out + 240] ! store rounds
324 add
$out, $tmp, $out ! start
or end of key schedule
325 sllx
$inc, 4, $inc ! 16 or -16
327 for ($i=0; $i<8; $i+=2) {
330 faeskeyx
%f4, `0x10+$i`, %f0
333 faeskeyx
%f0, 0x00, %f2
335 faeskeyx
%f2, 0x00, %f4
338 faeskeyx
%f4, `0x10+$i+1`, %f0
340 faeskeyx
%f0, 0x00, %f2
344 $code.=<<___
if ($i<6);
345 faeskeyx
%f2, 0x00, %f4
352 xor %o0, %o0, %o0 ! return 0
356 brz
,pt
$tmp, .L128aligned
360 fshiftorx
%f0, %f2, %f10, %f0
361 fshiftorx
%f2, %f4, %f10, %f2
365 and $inc, `10*16`, $tmp
366 st
$bits, [$out + 240] ! store rounds
367 add
$out, $tmp, $out ! start
or end of key schedule
368 sllx
$inc, 4, $inc ! 16 or -16
370 for ($i=0; $i<10; $i++) {
373 faeskeyx
%f2, `0x10+$i`, %f0
376 faeskeyx
%f0, 0x00, %f2
383 xor %o0, %o0, %o0 ! return 0
384 .type aes_fx_set_encrypt_key
,#function
385 .size aes_fx_set_encrypt_key
,.-aes_fx_set_encrypt_key
389 my ($inp,$out,$len,$key,$ivp,$dir) = map("%i$_",(0..5));
390 my ($rounds,$inner,$end,$inc,$ialign,$oalign,$mask) = map("%l$_",(0..7));
391 my ($iv0,$iv1,$r0hi,$r0lo,$rlhi,$rllo,$in0,$in1,$intail,$outhead,$fshift)
392 = map("%f$_",grep { !($_ & 1) } (16 .. 62));
393 my ($ileft,$iright) = ($ialign,$oalign);
396 .globl aes_fx_cbc_encrypt
399 save
%sp, -STACK_FRAME
-16, %sp
403 brz
,pn
$len, .Lcbc_no_data
404 sll
$ialign, 3, $ileft
407 add
%o7, .Linp_align
-1b
, %o7
409 ld
[$key + 240], $rounds
411 ld
[$ivp + 0], %f0 ! load ivec
414 sll
$oalign, 3, $mask
418 sll
$rounds, 4, $rounds
419 add
$rounds, $key, $end
420 ldd
[$key + 0], $r0hi ! round
[0]
421 ldd
[$key + 8], $r0lo
425 ldd
[$end + 0], $rlhi ! round
[last]
426 ldd
[$end + 8], $rllo
430 ldd
[$key + 16], %f10 ! round
[1]
431 ldd
[$key + 24], %f12
433 ldd
[%o7 + $ileft], $fshift ! shift left params
435 ldd
[$inp - 16], $in0 ! load input
437 ldda
[$inp]0x82, $intail ! non
-faulting load
438 brz
$dir, .Lcbc_decrypt
439 add
$inp, $inc, $inp ! inp
+=16
441 fxor
$r0hi, %f0, %f0 ! ivec
^=round
[0]
443 fshiftorx
$in0, $in1, $fshift, $in0
444 fshiftorx
$in1, $intail, $fshift, $in1
448 fxor
$in0, %f0, %f0 ! inp
^ivec
^round
[0]
450 ldd
[$key + 32], %f6 ! round
[2]
453 sub $rounds, 16*6, $inner
457 faesencx
%f2, %f10, %f0
458 faesencx
%f4, %f12, %f2
459 ldd
[$end + 16], %f10
460 ldd
[$end + 24], %f12
464 faesencx
%f2, %f6, %f0
465 faesencx
%f4, %f8, %f2
469 brnz
,a
$inner, .Lcbc_enc
470 sub $inner, 16*2, $inner
473 faesencx
%f2, %f10, %f0
474 faesencx
%f4, %f12, %f2
475 ldd
[$end + 16], %f10 ! round
[last-1]
476 ldd
[$end + 24], %f12
480 ldd
[$inp - 8], $in1 ! load
next input block
481 ldda
[$inp]0x82, $intail ! non
-faulting load
482 add
$inp, $inc, $inp ! inp
+=16
485 faesencx
%f2, %f6, %f0
486 faesencx
%f4, %f8, %f2
488 fshiftorx
$in0, $in1, $fshift, $in0
489 fshiftorx
$in1, $intail, $fshift, $in1
492 faesencx
%f2, %f10, %f0
493 faesencx
%f4, %f12, %f2
494 ldd
[$key + 16], %f10 ! round
[1]
495 ldd
[$key + 24], %f12
497 fxor
$r0hi, $in0, $in0 ! inp
^=round
[0]
498 fxor
$r0lo, $in1, $in1
501 faesenclx
%f2, $rlhi, %f0
502 faesenclx
%f4, $rllo, %f2
504 brnz
,pn
$oalign, .Lcbc_enc_unaligned_out
511 brnz
,a
$len, .Loop_cbc_enc
514 st
%f0, [$ivp + 0] ! output ivec
524 .Lcbc_enc_unaligned_out
:
525 ldd
[%o7 + $mask], $fshift ! shift right params
527 srl
$mask, $oalign, $mask
528 sub %g0, $ileft, $iright
530 fshiftorx
%f0, %f0, $fshift, %f6
531 fshiftorx
%f0, %f2, $fshift, %f8
533 stda
%f6, [$out + $mask]0xc0 ! partial store
534 orn
%g0, $mask, $mask
537 brz
$len, .Lcbc_enc_unaligned_out_done
539 b
.Loop_cbc_enc_unaligned_out
543 .Loop_cbc_enc_unaligned_out
:
545 fxor
$in0, %f0, %f0 ! inp
^ivec
^round
[0]
547 ldd
[$key + 32], %f6 ! round
[2]
551 faesencx
%f2, %f10, %f0
552 faesencx
%f4, %f12, %f2
553 ldd
[$key + 48], %f10 ! round
[3]
554 ldd
[$key + 56], %f12
558 brz
$ileft, .Lcbc_enc_aligned_inp
562 sllx
%o0, $ileft, %o0
563 srlx
%o1, $iright, %g1
564 sllx
%o1, $ileft, %o1
566 srlx
%o2, $iright, %o2
569 .Lcbc_enc_aligned_inp
:
571 faesencx
%f2, %f6, %f0
572 faesencx
%f4, %f8, %f2
573 ldd
[$key + 64], %f6 ! round
[4]
576 sub $rounds, 16*8, $inner
578 stx
%o0, [%sp + LOCALS
+ 0]
579 stx
%o1, [%sp + LOCALS
+ 8]
580 add
$inp, $inc, $inp ! inp
+=16
585 faesencx
%f2, %f10, %f0
586 faesencx
%f4, %f12, %f2
587 ldd
[$end + 16], %f10
588 ldd
[$end + 24], %f12
592 faesencx
%f2, %f6, %f0
593 faesencx
%f4, %f8, %f2
597 brnz
,a
$inner, .Lcbc_enc_unaligned
598 sub $inner, 16*2, $inner
601 faesencx
%f2, %f10, %f0
602 faesencx
%f4, %f12, %f2
603 ldd
[$end + 16], %f10 ! round
[last-1]
604 ldd
[$end + 24], %f12
607 faesencx
%f2, %f6, %f0
608 faesencx
%f4, %f8, %f2
610 ldd
[%sp + LOCALS
+ 0], $in0
611 ldd
[%sp + LOCALS
+ 8], $in1
614 faesencx
%f2, %f10, %f0
615 faesencx
%f4, %f12, %f2
616 ldd
[$key + 16], %f10 ! round
[1]
617 ldd
[$key + 24], %f12
619 fxor
$r0hi, $in0, $in0 ! inp
^=round
[0]
620 fxor
$r0lo, $in1, $in1
623 faesenclx
%f2, $rlhi, %f0
624 faesenclx
%f4, $rllo, %f2
626 fshiftorx
$outhead, %f0, $fshift, %f6
627 fshiftorx
%f0, %f2, $fshift, %f8
632 brnz
,a
$len, .Loop_cbc_enc_unaligned_out
635 .Lcbc_enc_unaligned_out_done
:
636 fshiftorx
%f2, %f2, $fshift, %f8
637 stda
%f8, [$out + $mask]0xc0 ! partial store
639 st
%f0, [$ivp + 0] ! output ivec
649 fshiftorx
$in0, $in1, $fshift, $in0
650 fshiftorx
$in1, $intail, $fshift, $in1
655 fxor
$in0, $r0hi, %f0 ! inp
^round
[0]
656 fxor
$in1, $r0lo, %f2
657 ldd
[$key + 32], %f6 ! round
[2]
660 sub $rounds, 16*6, $inner
664 faesdecx
%f2, %f10, %f0
665 faesdecx
%f4, %f12, %f2
666 ldd
[$end + 16], %f10
667 ldd
[$end + 24], %f12
671 faesdecx
%f2, %f6, %f0
672 faesdecx
%f4, %f8, %f2
676 brnz
,a
$inner, .Lcbc_dec
677 sub $inner, 16*2, $inner
680 faesdecx
%f2, %f10, %f0
681 faesdecx
%f4, %f12, %f2
682 ldd
[$end + 16], %f10 ! round
[last-1]
683 ldd
[$end + 24], %f12
686 faesdecx
%f2, %f6, %f0
687 faesdecx
%f4, %f8, %f2
688 fxor
$iv0, $rlhi, %f6 ! ivec
^round
[last]
689 fxor
$iv1, $rllo, %f8
695 ldd
[$inp - 8], $in1 ! load
next input block
696 ldda
[$inp]0x82, $intail ! non
-faulting load
697 add
$inp, $inc, $inp ! inp
+=16
700 faesdecx
%f2, %f10, %f0
701 faesdecx
%f4, %f12, %f2
702 ldd
[$key + 16], %f10 ! round
[1]
703 ldd
[$key + 24], %f12
705 fshiftorx
$in0, $in1, $fshift, $in0
706 fshiftorx
$in1, $intail, $fshift, $in1
709 faesdeclx
%f2, %f6, %f0
710 faesdeclx
%f4, %f8, %f2
712 brnz
,pn
$oalign, .Lcbc_dec_unaligned_out
719 brnz
,a
$len, .Loop_cbc_dec
722 st
$iv0, [$ivp + 0] ! output ivec
723 st
$iv0#lo, [$ivp + 4]
725 st
$iv1#lo, [$ivp + 12]
731 .Lcbc_dec_unaligned_out
:
732 ldd
[%o7 + $mask], $fshift ! shift right params
734 srl
$mask, $oalign, $mask
735 sub %g0, $ileft, $iright
737 fshiftorx
%f0, %f0, $fshift, %f6
738 fshiftorx
%f0, %f2, $fshift, %f8
740 stda
%f6, [$out + $mask]0xc0 ! partial store
741 orn
%g0, $mask, $mask
744 brz
$len, .Lcbc_dec_unaligned_out_done
746 b
.Loop_cbc_dec_unaligned_out
750 .Loop_cbc_dec_unaligned_out
:
752 fxor
$in0, $r0hi, %f0 ! inp
^round
[0]
753 fxor
$in1, $r0lo, %f2
754 ldd
[$key + 32], %f6 ! round
[2]
758 faesdecx
%f2, %f10, %f0
759 faesdecx
%f4, %f12, %f2
760 ldd
[$key + 48], %f10 ! round
[3]
761 ldd
[$key + 56], %f12
765 brz
$ileft, .Lcbc_dec_aligned_inp
769 sllx
%o0, $ileft, %o0
770 srlx
%o1, $iright, %g1
771 sllx
%o1, $ileft, %o1
773 srlx
%o2, $iright, %o2
776 .Lcbc_dec_aligned_inp
:
778 faesdecx
%f2, %f6, %f0
779 faesdecx
%f4, %f8, %f2
780 ldd
[$key + 64], %f6 ! round
[4]
783 sub $rounds, 16*8, $inner
785 stx
%o0, [%sp + LOCALS
+ 0]
786 stx
%o1, [%sp + LOCALS
+ 8]
787 add
$inp, $inc, $inp ! inp
+=16
792 faesdecx
%f2, %f10, %f0
793 faesdecx
%f4, %f12, %f2
794 ldd
[$end + 16], %f10
795 ldd
[$end + 24], %f12
799 faesdecx
%f2, %f6, %f0
800 faesdecx
%f4, %f8, %f2
804 brnz
,a
$inner, .Lcbc_dec_unaligned
805 sub $inner, 16*2, $inner
808 faesdecx
%f2, %f10, %f0
809 faesdecx
%f4, %f12, %f2
810 ldd
[$end + 16], %f10 ! round
[last-1]
811 ldd
[$end + 24], %f12
814 faesdecx
%f2, %f6, %f0
815 faesdecx
%f4, %f8, %f2
817 fxor
$iv0, $rlhi, %f6 ! ivec
^round
[last]
818 fxor
$iv1, $rllo, %f8
821 ldd
[%sp + LOCALS
+ 0], $in0
822 ldd
[%sp + LOCALS
+ 8], $in1
825 faesdecx
%f2, %f10, %f0
826 faesdecx
%f4, %f12, %f2
827 ldd
[$key + 16], %f10 ! round
[1]
828 ldd
[$key + 24], %f12
831 faesdeclx
%f2, %f6, %f0
832 faesdeclx
%f4, %f8, %f2
834 fshiftorx
$outhead, %f0, $fshift, %f6
835 fshiftorx
%f0, %f2, $fshift, %f8
840 brnz
,a
$len, .Loop_cbc_dec_unaligned_out
843 .Lcbc_dec_unaligned_out_done
:
844 fshiftorx
%f2, %f2, $fshift, %f8
845 stda
%f8, [$out + $mask]0xc0 ! partial store
847 st
$iv0, [$ivp + 0] ! output ivec
848 st
$iv0#lo, [$ivp + 4]
850 st
$iv1#lo, [$ivp + 12]
854 .type aes_fx_cbc_encrypt
,#function
855 .size aes_fx_cbc_encrypt
,.-aes_fx_cbc_encrypt
859 my ($inp,$out,$len,$key,$ivp) = map("%i$_",(0..5));
860 my ($rounds,$inner,$end,$inc,$ialign,$oalign,$mask) = map("%l$_",(0..7));
861 my ($ctr0,$ctr1,$r0hi,$r0lo,$rlhi,$rllo,$in0,$in1,$intail,$outhead,$fshift)
862 = map("%f$_",grep { !($_ & 1) } (16 .. 62));
863 my ($ileft,$iright) = ($ialign, $oalign);
867 .globl aes_fx_ctr32_encrypt_blocks
869 aes_fx_ctr32_encrypt_blocks
:
870 save
%sp, -STACK_FRAME
-16, %sp
874 brz
,pn
$len, .Lctr32_no_data
875 sll
$ialign, 3, $ileft
878 add
%o7, .Linp_align
- .Lpic
, %o7
880 ld
[$key + 240], $rounds
882 ld
[$ivp + 0], $ctr0 ! load counter
884 ld
[$ivp + 4], $ctr0#lo
885 sll
$oalign, 3, $mask
887 ld
[$ivp + 12], $ctr1#lo
888 ldd
[%o7 + 128], $one
890 sll
$rounds, 4, $rounds
891 add
$rounds, $key, $end
892 ldd
[$key + 0], $r0hi ! round
[0]
893 ldd
[$key + 8], $r0lo
897 ldd
[$key + 16], %f10 ! round
[1]
898 ldd
[$key + 24], %f12
902 ldd
[$end + 0], $rlhi ! round
[last]
903 ldd
[$end + 8], $rllo
905 ldd
[%o7 + $ileft], $fshift ! shiftleft params
907 ldd
[$inp - 16], $in0 ! load input
909 ldda
[$inp]0x82, $intail ! non
-faulting load
910 add
$inp, $inc, $inp ! inp
+=16
912 fshiftorx
$in0, $in1, $fshift, $in0
913 fshiftorx
$in1, $intail, $fshift, $in1
916 fxor
$ctr0, $r0hi, %f0 ! counter
^round
[0]
917 fxor
$ctr1, $r0lo, %f2
918 ldd
[$key + 32], %f6 ! round
[2]
921 sub $rounds, 16*6, $inner
925 faesencx
%f2, %f10, %f0
926 faesencx
%f4, %f12, %f2
927 ldd
[$end + 16], %f10
928 ldd
[$end + 24], %f12
932 faesencx
%f2, %f6, %f0
933 faesencx
%f4, %f8, %f2
937 brnz
,a
$inner, .Lctr32_enc
938 sub $inner, 16*2, $inner
941 faesencx
%f2, %f10, %f0
942 faesencx
%f4, %f12, %f2
943 ldd
[$end + 16], %f10 ! round
[last-1]
944 ldd
[$end + 24], %f12
947 faesencx
%f2, %f6, %f0
948 faesencx
%f4, %f8, %f2
949 fxor
$in0, $rlhi, %f6 ! inp
^round
[last]
950 fxor
$in1, $rllo, %f8
954 ldd
[$inp - 8], $in1 ! load
next input block
955 ldda
[$inp]0x82, $intail ! non
-faulting load
956 add
$inp, $inc, $inp ! inp
+=16
959 faesencx
%f2, %f10, %f0
960 faesencx
%f4, %f12, %f2
961 ldd
[$key + 16], %f10 ! round
[1]
962 ldd
[$key + 24], %f12
964 fshiftorx
$in0, $in1, $fshift, $in0
965 fshiftorx
$in1, $intail, $fshift, $in1
966 fpadd32
$ctr1, $one, $ctr1 ! increment counter
969 faesenclx
%f2, %f6, %f0
970 faesenclx
%f4, %f8, %f2
972 brnz
,pn
$oalign, .Lctr32_unaligned_out
979 brnz
,a
$len, .Loop_ctr32
987 .Lctr32_unaligned_out
:
988 ldd
[%o7 + $mask], $fshift ! shift right params
990 srl
$mask, $oalign, $mask
991 sub %g0, $ileft, $iright
993 fshiftorx
%f0, %f0, $fshift, %f6
994 fshiftorx
%f0, %f2, $fshift, %f8
996 stda
%f6, [$out + $mask]0xc0 ! partial store
997 orn
%g0, $mask, $mask
1000 brz
$len, .Lctr32_unaligned_out_done
1002 b
.Loop_ctr32_unaligned_out
1006 .Loop_ctr32_unaligned_out
:
1008 fxor
$ctr0, $r0hi, %f0 ! counter
^round
[0]
1009 fxor
$ctr1, $r0lo, %f2
1010 ldd
[$key + 32], %f6 ! round
[2]
1011 ldd
[$key + 40], %f8
1014 faesencx
%f2, %f10, %f0
1015 faesencx
%f4, %f12, %f2
1016 ldd
[$key + 48], %f10 ! round
[3]
1017 ldd
[$key + 56], %f12
1019 ldx
[$inp - 16], %o0
1021 brz
$ileft, .Lctr32_aligned_inp
1025 sllx
%o0, $ileft, %o0
1026 srlx
%o1, $iright, %g1
1027 sllx
%o1, $ileft, %o1
1029 srlx
%o2, $iright, %o2
1032 .Lctr32_aligned_inp
:
1034 faesencx
%f2, %f6, %f0
1035 faesencx
%f4, %f8, %f2
1036 ldd
[$key + 64], %f6 ! round
[4]
1037 ldd
[$key + 72], %f8
1039 sub $rounds, 16*8, $inner
1041 stx
%o0, [%sp + LOCALS
+ 0]
1042 stx
%o1, [%sp + LOCALS
+ 8]
1043 add
$inp, $inc, $inp ! inp
+=16
1046 .Lctr32_enc_unaligned
:
1048 faesencx
%f2, %f10, %f0
1049 faesencx
%f4, %f12, %f2
1050 ldd
[$end + 16], %f10
1051 ldd
[$end + 24], %f12
1055 faesencx
%f2, %f6, %f0
1056 faesencx
%f4, %f8, %f2
1060 brnz
,a
$inner, .Lctr32_enc_unaligned
1061 sub $inner, 16*2, $inner
1064 faesencx
%f2, %f10, %f0
1065 faesencx
%f4, %f12, %f2
1066 ldd
[$end + 16], %f10 ! round
[last-1]
1067 ldd
[$end + 24], %f12
1068 fpadd32
$ctr1, $one, $ctr1 ! increment counter
1071 faesencx
%f2, %f6, %f0
1072 faesencx
%f4, %f8, %f2
1073 fxor
$in0, $rlhi, %f6 ! inp
^round
[last]
1074 fxor
$in1, $rllo, %f8
1075 ldd
[%sp + LOCALS
+ 0], $in0
1076 ldd
[%sp + LOCALS
+ 8], $in1
1079 faesencx
%f2, %f10, %f0
1080 faesencx
%f4, %f12, %f2
1081 ldd
[$key + 16], %f10 ! round
[1]
1082 ldd
[$key + 24], %f12
1085 faesenclx
%f2, %f6, %f0
1086 faesenclx
%f4, %f8, %f2
1088 fshiftorx
$outhead, %f0, $fshift, %f6
1089 fshiftorx
%f0, %f2, $fshift, %f8
1094 brnz
,a
$len, .Loop_ctr32_unaligned_out
1097 .Lctr32_unaligned_out_done
:
1098 fshiftorx
%f2, %f2, $fshift, %f8
1099 stda
%f8, [$out + $mask]0xc0 ! partial store
1103 .type aes_fx_ctr32_encrypt_blocks
,#function
1104 .size aes_fx_ctr32_encrypt_blocks
,.-aes_fx_ctr32_encrypt_blocks
1107 .Linp_align
: ! fshiftorx parameters
for left
shift toward
%rs1
1108 .byte
0, 0, 64, 0, 0, 64, 0, -64
1109 .byte
0, 0, 56, 8, 0, 56, 8, -56
1110 .byte
0, 0, 48, 16, 0, 48, 16, -48
1111 .byte
0, 0, 40, 24, 0, 40, 24, -40
1112 .byte
0, 0, 32, 32, 0, 32, 32, -32
1113 .byte
0, 0, 24, 40, 0, 24, 40, -24
1114 .byte
0, 0, 16, 48, 0, 16, 48, -16
1115 .byte
0, 0, 8, 56, 0, 8, 56, -8
1116 .Lout_align
: ! fshiftorx parameters
for right
shift toward
%rs2
1117 .byte
0, 0, 0, 64, 0, 0, 64, 0
1118 .byte
0, 0, 8, 56, 0, 8, 56, -8
1119 .byte
0, 0, 16, 48, 0, 16, 48, -16
1120 .byte
0, 0, 24, 40, 0, 24, 40, -24
1121 .byte
0, 0, 32, 32, 0, 32, 32, -32
1122 .byte
0, 0, 40, 24, 0, 40, 24, -40
1123 .byte
0, 0, 48, 16, 0, 48, 16, -48
1124 .byte
0, 0, 56, 8, 0, 56, 8, -56
1127 .asciz
"AES for Fujitsu SPARC64 X, CRYPTOGAMS by <appro\@openssl.org>"
1131 # Purpose of these subroutines is to explicitly encode VIS instructions,
1132 # so that one can compile the module without having to specify VIS
1133 # extensions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
1134 # Idea is to reserve for option to produce "universal" binary and let
1135 # programmer detect if current CPU is VIS capable at run-time.
1137 my ($mnemonic,$rs1,$rs2,$rd)=@_;
1139 my %visopf = ( "faligndata" => 0x048,
1140 "bshuffle" => 0x04c,
1145 $ref = "$mnemonic\t$rs1,$rs2,$rd";
1147 if ($opf=$visopf{$mnemonic}) {
1148 foreach ($rs1,$rs2,$rd) {
1149 return $ref if (!/%f([0-9]{1,2})/);
1152 return $ref if ($1&1);
1153 # re-encode for upper double register addressing
1158 return sprintf ".word\t0x%08x !%s",
1159 0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
1167 my ($mnemonic,$rs1,$rs2,$rd)=@_;
1168 my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 );
1170 my %visopf = ( "alignaddr" => 0x018,
1172 "alignaddrl" => 0x01a );
1174 $ref = "$mnemonic\t$rs1,$rs2,$rd";
1176 if ($opf=$visopf{$mnemonic}) {
1177 foreach ($rs1,$rs2,$rd) {
1178 return $ref if (!/%([goli])([0-9])/);
1182 return sprintf ".word\t0x%08x !%s",
1183 0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
1191 my ($mnemonic,$rs1,$rs2,$rd)=@_;
1193 my %aesopf = ( "faesencx" => 0x90,
1195 "faesenclx" => 0x92,
1196 "faesdeclx" => 0x93,
1197 "faeskeyx" => 0x94 );
1199 $ref = "$mnemonic\t$rs1,$rs2,$rd";
1201 if (defined($opf=$aesopf{$mnemonic})) {
1202 $rs2 = ($rs2 =~ /%f([0-6]*[02468])/) ?
(($1|$1>>5)&31) : $rs2;
1203 $rs2 = oct($rs2) if ($rs2 =~ /^0/);
1205 foreach ($rs1,$rd) {
1206 return $ref if (!/%f([0-9]{1,2})/);
1209 return $ref if ($1&1);
1210 # re-encode for upper double register addressing
1215 return sprintf ".word\t0x%08x !%s",
1216 2<<30|$rd<<25|0x36<<19|$rs1<<14|$opf<<5|$rs2,
1224 my ($mnemonic,$rs1,$rs2,$rs3,$rd)=@_;
1226 my %aesopf = ( "fshiftorx" => 0x0b );
1228 $ref = "$mnemonic\t$rs1,$rs2,$rs3,$rd";
1230 if (defined($opf=$aesopf{$mnemonic})) {
1231 foreach ($rs1,$rs2,$rs3,$rd) {
1232 return $ref if (!/%f([0-9]{1,2})/);
1235 return $ref if ($1&1);
1236 # re-encode for upper double register addressing
1241 return sprintf ".word\t0x%08x !%s",
1242 2<<30|$rd<<25|0x37<<19|$rs1<<14|$rs3<<9|$opf<<5|$rs2,
1249 foreach (split("\n",$code)) {
1250 s/\`([^\`]*)\`/eval $1/ge;
1252 s/%f([0-9]+)#lo/sprintf "%%f%d",$1+1/ge;
1254 s
/\b(faes[^x]{3,4}x)\s+(%f[0-9]{1,2}),\s*([%fx0-9]+),\s*(%f[0-9]{1,2})/
1257 s
/\b([f][^\s]*)\s+(%f[0-9]{1,2}),\s*(%f[0-9]{1,2}),\s*(%f[0-9]{1,2}),\s*(%f[0-9]{1,2})/
1258 &unfx3src
($1,$2,$3,$4,$5)
1260 s
/\b([fb][^\s]*)\s+(%f[0-9]{1,2}),\s*(%f[0-9]{1,2}),\s*(%f[0-9]{1,2})/
1263 s
/\b(alignaddr[l]*)\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/
1264 &unvis3
($1,$2,$3,$4)
1269 close STDOUT
or die "error closing STDOUT: $!";