2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
19 # Initial support for Fujitsu SPARC64 X/X+ comprises minimally
20 # required key setup and single-block procedures.
24 # Add "teaser" CBC and CTR mode-specific subroutines. "Teaser" means
25 # that parallelizeable nature of CBC decrypt and CTR is not utilized
26 # yet. CBC encrypt on the other hand is as good as it can possibly
27 # get processing one byte in 4.1 cycles with 128-bit key on SPARC64 X.
28 # This is ~6x faster than pure software implementation...
32 # Switch from faligndata to fshiftorx, which allows to omit alignaddr
33 # instructions and improve single-block and short-input performance
34 # with misaligned data.
37 open STDOUT
,">$output";
40 my ($inp,$out,$key,$rounds,$tmp,$mask) = map("%o$_",(0..5));
43 #include "sparc_arch.h"
45 #define LOCALS (STACK_BIAS+STACK_FRAME)
52 and $inp, 7, $tmp ! is input aligned?
54 ldd
[$key + 0], %f6 ! round
[0]
57 ld
[$key + 240], $rounds
60 add
%o7, .Linp_align
-1b
, %o7
63 ldd
[$inp + 0], %f0 ! load input
64 brz
,pt
$tmp, .Lenc_inp_aligned
67 ldd
[%o7 + $tmp], %f14 ! shift left params
69 fshiftorx
%f0, %f2, %f14, %f0
70 fshiftorx
%f2, %f4, %f14, %f2
73 ldd
[$key + 16], %f10 ! round
[1]
76 fxor
%f0, %f6, %f0 ! ^=round
[0]
78 ldd
[$key + 32], %f6 ! round
[2]
81 sub $rounds, 4, $rounds
85 faesencx
%f2, %f10, %f0
86 faesencx
%f4, %f12, %f2
92 faesencx
%f2, %f6, %f0
93 faesencx
%f4, %f8, %f2
97 brnz
,a
$rounds, .Loop_enc
98 sub $rounds, 2, $rounds
100 andcc
$out, 7, $tmp ! is output aligned?
103 srl
$mask, $tmp, $mask
108 faesencx
%f2, %f10, %f0
109 faesencx
%f4, %f12, %f2
110 ldd
[%o7 + $tmp], %f14 ! shift right params
113 faesenclx
%f2, %f6, %f0
114 faesenclx
%f4, %f8, %f2
116 bnz
,pn
%icc, .Lenc_out_unaligned
127 fshiftorx
%f0, %f0, %f14, %f4
128 fshiftorx
%f0, %f2, %f14, %f6
129 fshiftorx
%f2, %f2, %f14, %f8
131 stda
%f4, [$out + $mask]0xc0 ! partial store
133 stda
%f8, [$inp + $tmp]0xc0 ! partial store
136 .type aes_fx_encrypt
,#function
137 .size aes_fx_encrypt
,.-aes_fx_encrypt
139 .globl aes_fx_decrypt
142 and $inp, 7, $tmp ! is input aligned?
144 ldd
[$key + 0], %f6 ! round
[0]
147 ld
[$key + 240], $rounds
150 add
%o7, .Linp_align
-1b
, %o7
153 ldd
[$inp + 0], %f0 ! load input
154 brz
,pt
$tmp, .Ldec_inp_aligned
157 ldd
[%o7 + $tmp], %f14 ! shift left params
159 fshiftorx
%f0, %f2, %f14, %f0
160 fshiftorx
%f2, %f4, %f14, %f2
163 ldd
[$key + 16], %f10 ! round
[1]
164 ldd
[$key + 24], %f12
166 fxor
%f0, %f6, %f0 ! ^=round
[0]
168 ldd
[$key + 32], %f6 ! round
[2]
171 sub $rounds, 4, $rounds
175 faesdecx
%f2, %f10, %f0
176 faesdecx
%f4, %f12, %f2
177 ldd
[$key + 16], %f10
178 ldd
[$key + 24], %f12
182 faesdecx
%f2, %f6, %f0
183 faesdecx
%f4, %f8, %f2
187 brnz
,a
$rounds, .Loop_dec
188 sub $rounds, 2, $rounds
190 andcc
$out, 7, $tmp ! is output aligned?
193 srl
$mask, $tmp, $mask
198 faesdecx
%f2, %f10, %f0
199 faesdecx
%f4, %f12, %f2
200 ldd
[%o7 + $tmp], %f14 ! shift right params
203 faesdeclx
%f2, %f6, %f0
204 faesdeclx
%f4, %f8, %f2
206 bnz
,pn
%icc, .Ldec_out_unaligned
217 fshiftorx
%f0, %f0, %f14, %f4
218 fshiftorx
%f0, %f2, %f14, %f6
219 fshiftorx
%f2, %f2, %f14, %f8
221 stda
%f4, [$out + $mask]0xc0 ! partial store
223 stda
%f8, [$inp + $tmp]0xc0 ! partial store
226 .type aes_fx_decrypt
,#function
227 .size aes_fx_decrypt
,.-aes_fx_decrypt
231 my ($inp,$bits,$out,$tmp,$inc) = map("%o$_",(0..5));
233 .globl aes_fx_set_decrypt_key
235 aes_fx_set_decrypt_key
:
240 .type aes_fx_set_decrypt_key
,#function
241 .size aes_fx_set_decrypt_key
,.-aes_fx_set_decrypt_key
243 .globl aes_fx_set_encrypt_key
245 aes_fx_set_encrypt_key
:
255 add
%o7, .Linp_align
-1b
, %o7
257 ldd
[%o7 + $tmp], %f10 ! shift left params
267 brz
,pt
$tmp, .L256aligned
271 fshiftorx
%f0, %f2, %f10, %f0
272 fshiftorx
%f2, %f4, %f10, %f2
273 fshiftorx
%f4, %f6, %f10, %f4
274 fshiftorx
%f6, %f8, %f10, %f6
278 and $inc, `14*16`, $tmp
279 st
$bits, [$out + 240] ! store rounds
280 add
$out, $tmp, $out ! start
or end of key schedule
281 sllx
$inc, 4, $inc ! 16 or -16
283 for ($i=0; $i<6; $i++) {
286 faeskeyx
%f6, `0x10+$i`, %f0
289 faeskeyx
%f0, 0x00, %f2
291 faeskeyx
%f2, 0x01, %f4
294 faeskeyx
%f4, 0x00, %f6
299 faeskeyx
%f6, `0x10+$i`, %f0
302 faeskeyx
%f0, 0x00, %f2
309 xor %o0, %o0, %o0 ! return 0
313 brz
,pt
$tmp, .L192aligned
317 fshiftorx
%f0, %f2, %f10, %f0
318 fshiftorx
%f2, %f4, %f10, %f2
319 fshiftorx
%f4, %f6, %f10, %f4
323 and $inc, `12*16`, $tmp
324 st
$bits, [$out + 240] ! store rounds
325 add
$out, $tmp, $out ! start
or end of key schedule
326 sllx
$inc, 4, $inc ! 16 or -16
328 for ($i=0; $i<8; $i+=2) {
331 faeskeyx
%f4, `0x10+$i`, %f0
334 faeskeyx
%f0, 0x00, %f2
336 faeskeyx
%f2, 0x00, %f4
339 faeskeyx
%f4, `0x10+$i+1`, %f0
341 faeskeyx
%f0, 0x00, %f2
345 $code.=<<___
if ($i<6);
346 faeskeyx
%f2, 0x00, %f4
353 xor %o0, %o0, %o0 ! return 0
357 brz
,pt
$tmp, .L128aligned
361 fshiftorx
%f0, %f2, %f10, %f0
362 fshiftorx
%f2, %f4, %f10, %f2
366 and $inc, `10*16`, $tmp
367 st
$bits, [$out + 240] ! store rounds
368 add
$out, $tmp, $out ! start
or end of key schedule
369 sllx
$inc, 4, $inc ! 16 or -16
371 for ($i=0; $i<10; $i++) {
374 faeskeyx
%f2, `0x10+$i`, %f0
377 faeskeyx
%f0, 0x00, %f2
384 xor %o0, %o0, %o0 ! return 0
385 .type aes_fx_set_encrypt_key
,#function
386 .size aes_fx_set_encrypt_key
,.-aes_fx_set_encrypt_key
390 my ($inp,$out,$len,$key,$ivp,$dir) = map("%i$_",(0..5));
391 my ($rounds,$inner,$end,$inc,$ialign,$oalign,$mask) = map("%l$_",(0..7));
392 my ($iv0,$iv1,$r0hi,$r0lo,$rlhi,$rllo,$in0,$in1,$intail,$outhead,$fshift)
393 = map("%f$_",grep { !($_ & 1) } (16 .. 62));
394 my ($ileft,$iright) = ($ialign,$oalign);
397 .globl aes_fx_cbc_encrypt
400 save
%sp, -STACK_FRAME
-16, %sp
404 brz
,pn
$len, .Lcbc_no_data
405 sll
$ialign, 3, $ileft
408 add
%o7, .Linp_align
-1b
, %o7
410 ld
[$key + 240], $rounds
412 ld
[$ivp + 0], %f0 ! load ivec
415 sll
$oalign, 3, $mask
419 sll
$rounds, 4, $rounds
420 add
$rounds, $key, $end
421 ldd
[$key + 0], $r0hi ! round
[0]
422 ldd
[$key + 8], $r0lo
426 ldd
[$end + 0], $rlhi ! round
[last]
427 ldd
[$end + 8], $rllo
431 ldd
[$key + 16], %f10 ! round
[1]
432 ldd
[$key + 24], %f12
434 ldd
[%o7 + $ileft], $fshift ! shift left params
436 ldd
[$inp - 16], $in0 ! load input
438 ldda
[$inp]0x82, $intail ! non
-faulting load
439 brz
$dir, .Lcbc_decrypt
440 add
$inp, $inc, $inp ! inp
+=16
442 fxor
$r0hi, %f0, %f0 ! ivec
^=round
[0]
444 fshiftorx
$in0, $in1, $fshift, $in0
445 fshiftorx
$in1, $intail, $fshift, $in1
449 fxor
$in0, %f0, %f0 ! inp
^ivec
^round
[0]
451 ldd
[$key + 32], %f6 ! round
[2]
454 sub $rounds, 16*6, $inner
458 faesencx
%f2, %f10, %f0
459 faesencx
%f4, %f12, %f2
460 ldd
[$end + 16], %f10
461 ldd
[$end + 24], %f12
465 faesencx
%f2, %f6, %f0
466 faesencx
%f4, %f8, %f2
470 brnz
,a
$inner, .Lcbc_enc
471 sub $inner, 16*2, $inner
474 faesencx
%f2, %f10, %f0
475 faesencx
%f4, %f12, %f2
476 ldd
[$end + 16], %f10 ! round
[last-1]
477 ldd
[$end + 24], %f12
481 ldd
[$inp - 8], $in1 ! load
next input block
482 ldda
[$inp]0x82, $intail ! non
-faulting load
483 add
$inp, $inc, $inp ! inp
+=16
486 faesencx
%f2, %f6, %f0
487 faesencx
%f4, %f8, %f2
489 fshiftorx
$in0, $in1, $fshift, $in0
490 fshiftorx
$in1, $intail, $fshift, $in1
493 faesencx
%f2, %f10, %f0
494 faesencx
%f4, %f12, %f2
495 ldd
[$key + 16], %f10 ! round
[1]
496 ldd
[$key + 24], %f12
498 fxor
$r0hi, $in0, $in0 ! inp
^=round
[0]
499 fxor
$r0lo, $in1, $in1
502 faesenclx
%f2, $rlhi, %f0
503 faesenclx
%f4, $rllo, %f2
505 brnz
,pn
$oalign, .Lcbc_enc_unaligned_out
512 brnz
,a
$len, .Loop_cbc_enc
515 st
%f0, [$ivp + 0] ! output ivec
525 .Lcbc_enc_unaligned_out
:
526 ldd
[%o7 + $mask], $fshift ! shift right params
528 srl
$mask, $oalign, $mask
529 sub %g0, $ileft, $iright
531 fshiftorx
%f0, %f0, $fshift, %f6
532 fshiftorx
%f0, %f2, $fshift, %f8
534 stda
%f6, [$out + $mask]0xc0 ! partial store
535 orn
%g0, $mask, $mask
538 brz
$len, .Lcbc_enc_unaligned_out_done
540 b
.Loop_cbc_enc_unaligned_out
544 .Loop_cbc_enc_unaligned_out
:
546 fxor
$in0, %f0, %f0 ! inp
^ivec
^round
[0]
548 ldd
[$key + 32], %f6 ! round
[2]
552 faesencx
%f2, %f10, %f0
553 faesencx
%f4, %f12, %f2
554 ldd
[$key + 48], %f10 ! round
[3]
555 ldd
[$key + 56], %f12
559 brz
$ileft, .Lcbc_enc_aligned_inp
563 sllx
%o0, $ileft, %o0
564 srlx
%o1, $iright, %g1
565 sllx
%o1, $ileft, %o1
567 srlx
%o2, $iright, %o2
570 .Lcbc_enc_aligned_inp
:
572 faesencx
%f2, %f6, %f0
573 faesencx
%f4, %f8, %f2
574 ldd
[$key + 64], %f6 ! round
[4]
577 sub $rounds, 16*8, $inner
579 stx
%o0, [%sp + LOCALS
+ 0]
580 stx
%o1, [%sp + LOCALS
+ 8]
581 add
$inp, $inc, $inp ! inp
+=16
586 faesencx
%f2, %f10, %f0
587 faesencx
%f4, %f12, %f2
588 ldd
[$end + 16], %f10
589 ldd
[$end + 24], %f12
593 faesencx
%f2, %f6, %f0
594 faesencx
%f4, %f8, %f2
598 brnz
,a
$inner, .Lcbc_enc_unaligned
599 sub $inner, 16*2, $inner
602 faesencx
%f2, %f10, %f0
603 faesencx
%f4, %f12, %f2
604 ldd
[$end + 16], %f10 ! round
[last-1]
605 ldd
[$end + 24], %f12
608 faesencx
%f2, %f6, %f0
609 faesencx
%f4, %f8, %f2
611 ldd
[%sp + LOCALS
+ 0], $in0
612 ldd
[%sp + LOCALS
+ 8], $in1
615 faesencx
%f2, %f10, %f0
616 faesencx
%f4, %f12, %f2
617 ldd
[$key + 16], %f10 ! round
[1]
618 ldd
[$key + 24], %f12
620 fxor
$r0hi, $in0, $in0 ! inp
^=round
[0]
621 fxor
$r0lo, $in1, $in1
624 faesenclx
%f2, $rlhi, %f0
625 faesenclx
%f4, $rllo, %f2
627 fshiftorx
$outhead, %f0, $fshift, %f6
628 fshiftorx
%f0, %f2, $fshift, %f8
633 brnz
,a
$len, .Loop_cbc_enc_unaligned_out
636 .Lcbc_enc_unaligned_out_done
:
637 fshiftorx
%f2, %f2, $fshift, %f8
638 stda
%f8, [$out + $mask]0xc0 ! partial store
640 st
%f0, [$ivp + 0] ! output ivec
650 fshiftorx
$in0, $in1, $fshift, $in0
651 fshiftorx
$in1, $intail, $fshift, $in1
656 fxor
$in0, $r0hi, %f0 ! inp
^round
[0]
657 fxor
$in1, $r0lo, %f2
658 ldd
[$key + 32], %f6 ! round
[2]
661 sub $rounds, 16*6, $inner
665 faesdecx
%f2, %f10, %f0
666 faesdecx
%f4, %f12, %f2
667 ldd
[$end + 16], %f10
668 ldd
[$end + 24], %f12
672 faesdecx
%f2, %f6, %f0
673 faesdecx
%f4, %f8, %f2
677 brnz
,a
$inner, .Lcbc_dec
678 sub $inner, 16*2, $inner
681 faesdecx
%f2, %f10, %f0
682 faesdecx
%f4, %f12, %f2
683 ldd
[$end + 16], %f10 ! round
[last-1]
684 ldd
[$end + 24], %f12
687 faesdecx
%f2, %f6, %f0
688 faesdecx
%f4, %f8, %f2
689 fxor
$iv0, $rlhi, %f6 ! ivec
^round
[last]
690 fxor
$iv1, $rllo, %f8
696 ldd
[$inp - 8], $in1 ! load
next input block
697 ldda
[$inp]0x82, $intail ! non
-faulting load
698 add
$inp, $inc, $inp ! inp
+=16
701 faesdecx
%f2, %f10, %f0
702 faesdecx
%f4, %f12, %f2
703 ldd
[$key + 16], %f10 ! round
[1]
704 ldd
[$key + 24], %f12
706 fshiftorx
$in0, $in1, $fshift, $in0
707 fshiftorx
$in1, $intail, $fshift, $in1
710 faesdeclx
%f2, %f6, %f0
711 faesdeclx
%f4, %f8, %f2
713 brnz
,pn
$oalign, .Lcbc_dec_unaligned_out
720 brnz
,a
$len, .Loop_cbc_dec
723 st
$iv0, [$ivp + 0] ! output ivec
724 st
$iv0#lo, [$ivp + 4]
726 st
$iv1#lo, [$ivp + 12]
732 .Lcbc_dec_unaligned_out
:
733 ldd
[%o7 + $mask], $fshift ! shift right params
735 srl
$mask, $oalign, $mask
736 sub %g0, $ileft, $iright
738 fshiftorx
%f0, %f0, $fshift, %f6
739 fshiftorx
%f0, %f2, $fshift, %f8
741 stda
%f6, [$out + $mask]0xc0 ! partial store
742 orn
%g0, $mask, $mask
745 brz
$len, .Lcbc_dec_unaligned_out_done
747 b
.Loop_cbc_dec_unaligned_out
751 .Loop_cbc_dec_unaligned_out
:
753 fxor
$in0, $r0hi, %f0 ! inp
^round
[0]
754 fxor
$in1, $r0lo, %f2
755 ldd
[$key + 32], %f6 ! round
[2]
759 faesdecx
%f2, %f10, %f0
760 faesdecx
%f4, %f12, %f2
761 ldd
[$key + 48], %f10 ! round
[3]
762 ldd
[$key + 56], %f12
766 brz
$ileft, .Lcbc_dec_aligned_inp
770 sllx
%o0, $ileft, %o0
771 srlx
%o1, $iright, %g1
772 sllx
%o1, $ileft, %o1
774 srlx
%o2, $iright, %o2
777 .Lcbc_dec_aligned_inp
:
779 faesdecx
%f2, %f6, %f0
780 faesdecx
%f4, %f8, %f2
781 ldd
[$key + 64], %f6 ! round
[4]
784 sub $rounds, 16*8, $inner
786 stx
%o0, [%sp + LOCALS
+ 0]
787 stx
%o1, [%sp + LOCALS
+ 8]
788 add
$inp, $inc, $inp ! inp
+=16
793 faesdecx
%f2, %f10, %f0
794 faesdecx
%f4, %f12, %f2
795 ldd
[$end + 16], %f10
796 ldd
[$end + 24], %f12
800 faesdecx
%f2, %f6, %f0
801 faesdecx
%f4, %f8, %f2
805 brnz
,a
$inner, .Lcbc_dec_unaligned
806 sub $inner, 16*2, $inner
809 faesdecx
%f2, %f10, %f0
810 faesdecx
%f4, %f12, %f2
811 ldd
[$end + 16], %f10 ! round
[last-1]
812 ldd
[$end + 24], %f12
815 faesdecx
%f2, %f6, %f0
816 faesdecx
%f4, %f8, %f2
818 fxor
$iv0, $rlhi, %f6 ! ivec
^round
[last]
819 fxor
$iv1, $rllo, %f8
822 ldd
[%sp + LOCALS
+ 0], $in0
823 ldd
[%sp + LOCALS
+ 8], $in1
826 faesdecx
%f2, %f10, %f0
827 faesdecx
%f4, %f12, %f2
828 ldd
[$key + 16], %f10 ! round
[1]
829 ldd
[$key + 24], %f12
832 faesdeclx
%f2, %f6, %f0
833 faesdeclx
%f4, %f8, %f2
835 fshiftorx
$outhead, %f0, $fshift, %f6
836 fshiftorx
%f0, %f2, $fshift, %f8
841 brnz
,a
$len, .Loop_cbc_dec_unaligned_out
844 .Lcbc_dec_unaligned_out_done
:
845 fshiftorx
%f2, %f2, $fshift, %f8
846 stda
%f8, [$out + $mask]0xc0 ! partial store
848 st
$iv0, [$ivp + 0] ! output ivec
849 st
$iv0#lo, [$ivp + 4]
851 st
$iv1#lo, [$ivp + 12]
855 .type aes_fx_cbc_encrypt
,#function
856 .size aes_fx_cbc_encrypt
,.-aes_fx_cbc_encrypt
860 my ($inp,$out,$len,$key,$ivp) = map("%i$_",(0..5));
861 my ($rounds,$inner,$end,$inc,$ialign,$oalign,$mask) = map("%l$_",(0..7));
862 my ($ctr0,$ctr1,$r0hi,$r0lo,$rlhi,$rllo,$in0,$in1,$intail,$outhead,$fshift)
863 = map("%f$_",grep { !($_ & 1) } (16 .. 62));
864 my ($ileft,$iright) = ($ialign, $oalign);
868 .globl aes_fx_ctr32_encrypt_blocks
870 aes_fx_ctr32_encrypt_blocks
:
871 save
%sp, -STACK_FRAME
-16, %sp
875 brz
,pn
$len, .Lctr32_no_data
876 sll
$ialign, 3, $ileft
879 add
%o7, .Linp_align
- .Lpic
, %o7
881 ld
[$key + 240], $rounds
883 ld
[$ivp + 0], $ctr0 ! load counter
885 ld
[$ivp + 4], $ctr0#lo
886 sll
$oalign, 3, $mask
888 ld
[$ivp + 12], $ctr1#lo
889 ldd
[%o7 + 128], $one
891 sll
$rounds, 4, $rounds
892 add
$rounds, $key, $end
893 ldd
[$key + 0], $r0hi ! round
[0]
894 ldd
[$key + 8], $r0lo
898 ldd
[$key + 16], %f10 ! round
[1]
899 ldd
[$key + 24], %f12
903 ldd
[$end + 0], $rlhi ! round
[last]
904 ldd
[$end + 8], $rllo
906 ldd
[%o7 + $ileft], $fshift ! shiftleft params
908 ldd
[$inp - 16], $in0 ! load input
910 ldda
[$inp]0x82, $intail ! non
-faulting load
911 add
$inp, $inc, $inp ! inp
+=16
913 fshiftorx
$in0, $in1, $fshift, $in0
914 fshiftorx
$in1, $intail, $fshift, $in1
917 fxor
$ctr0, $r0hi, %f0 ! counter
^round
[0]
918 fxor
$ctr1, $r0lo, %f2
919 ldd
[$key + 32], %f6 ! round
[2]
922 sub $rounds, 16*6, $inner
926 faesencx
%f2, %f10, %f0
927 faesencx
%f4, %f12, %f2
928 ldd
[$end + 16], %f10
929 ldd
[$end + 24], %f12
933 faesencx
%f2, %f6, %f0
934 faesencx
%f4, %f8, %f2
938 brnz
,a
$inner, .Lctr32_enc
939 sub $inner, 16*2, $inner
942 faesencx
%f2, %f10, %f0
943 faesencx
%f4, %f12, %f2
944 ldd
[$end + 16], %f10 ! round
[last-1]
945 ldd
[$end + 24], %f12
948 faesencx
%f2, %f6, %f0
949 faesencx
%f4, %f8, %f2
950 fxor
$in0, $rlhi, %f6 ! inp
^round
[last]
951 fxor
$in1, $rllo, %f8
955 ldd
[$inp - 8], $in1 ! load
next input block
956 ldda
[$inp]0x82, $intail ! non
-faulting load
957 add
$inp, $inc, $inp ! inp
+=16
960 faesencx
%f2, %f10, %f0
961 faesencx
%f4, %f12, %f2
962 ldd
[$key + 16], %f10 ! round
[1]
963 ldd
[$key + 24], %f12
965 fshiftorx
$in0, $in1, $fshift, $in0
966 fshiftorx
$in1, $intail, $fshift, $in1
967 fpadd32
$ctr1, $one, $ctr1 ! increment counter
970 faesenclx
%f2, %f6, %f0
971 faesenclx
%f4, %f8, %f2
973 brnz
,pn
$oalign, .Lctr32_unaligned_out
980 brnz
,a
$len, .Loop_ctr32
988 .Lctr32_unaligned_out
:
989 ldd
[%o7 + $mask], $fshift ! shift right params
991 srl
$mask, $oalign, $mask
992 sub %g0, $ileft, $iright
994 fshiftorx
%f0, %f0, $fshift, %f6
995 fshiftorx
%f0, %f2, $fshift, %f8
997 stda
%f6, [$out + $mask]0xc0 ! partial store
998 orn
%g0, $mask, $mask
1001 brz
$len, .Lctr32_unaligned_out_done
1003 b
.Loop_ctr32_unaligned_out
1007 .Loop_ctr32_unaligned_out
:
1009 fxor
$ctr0, $r0hi, %f0 ! counter
^round
[0]
1010 fxor
$ctr1, $r0lo, %f2
1011 ldd
[$key + 32], %f6 ! round
[2]
1012 ldd
[$key + 40], %f8
1015 faesencx
%f2, %f10, %f0
1016 faesencx
%f4, %f12, %f2
1017 ldd
[$key + 48], %f10 ! round
[3]
1018 ldd
[$key + 56], %f12
1020 ldx
[$inp - 16], %o0
1022 brz
$ileft, .Lctr32_aligned_inp
1026 sllx
%o0, $ileft, %o0
1027 srlx
%o1, $iright, %g1
1028 sllx
%o1, $ileft, %o1
1030 srlx
%o2, $iright, %o2
1033 .Lctr32_aligned_inp
:
1035 faesencx
%f2, %f6, %f0
1036 faesencx
%f4, %f8, %f2
1037 ldd
[$key + 64], %f6 ! round
[4]
1038 ldd
[$key + 72], %f8
1040 sub $rounds, 16*8, $inner
1042 stx
%o0, [%sp + LOCALS
+ 0]
1043 stx
%o1, [%sp + LOCALS
+ 8]
1044 add
$inp, $inc, $inp ! inp
+=16
1047 .Lctr32_enc_unaligned
:
1049 faesencx
%f2, %f10, %f0
1050 faesencx
%f4, %f12, %f2
1051 ldd
[$end + 16], %f10
1052 ldd
[$end + 24], %f12
1056 faesencx
%f2, %f6, %f0
1057 faesencx
%f4, %f8, %f2
1061 brnz
,a
$inner, .Lctr32_enc_unaligned
1062 sub $inner, 16*2, $inner
1065 faesencx
%f2, %f10, %f0
1066 faesencx
%f4, %f12, %f2
1067 ldd
[$end + 16], %f10 ! round
[last-1]
1068 ldd
[$end + 24], %f12
1069 fpadd32
$ctr1, $one, $ctr1 ! increment counter
1072 faesencx
%f2, %f6, %f0
1073 faesencx
%f4, %f8, %f2
1074 fxor
$in0, $rlhi, %f6 ! inp
^round
[last]
1075 fxor
$in1, $rllo, %f8
1076 ldd
[%sp + LOCALS
+ 0], $in0
1077 ldd
[%sp + LOCALS
+ 8], $in1
1080 faesencx
%f2, %f10, %f0
1081 faesencx
%f4, %f12, %f2
1082 ldd
[$key + 16], %f10 ! round
[1]
1083 ldd
[$key + 24], %f12
1086 faesenclx
%f2, %f6, %f0
1087 faesenclx
%f4, %f8, %f2
1089 fshiftorx
$outhead, %f0, $fshift, %f6
1090 fshiftorx
%f0, %f2, $fshift, %f8
1095 brnz
,a
$len, .Loop_ctr32_unaligned_out
1098 .Lctr32_unaligned_out_done
:
1099 fshiftorx
%f2, %f2, $fshift, %f8
1100 stda
%f8, [$out + $mask]0xc0 ! partial store
1104 .type aes_fx_ctr32_encrypt_blocks
,#function
1105 .size aes_fx_ctr32_encrypt_blocks
,.-aes_fx_ctr32_encrypt_blocks
1108 .Linp_align
: ! fshiftorx parameters
for left
shift toward
%rs1
1109 .byte
0, 0, 64, 0, 0, 64, 0, -64
1110 .byte
0, 0, 56, 8, 0, 56, 8, -56
1111 .byte
0, 0, 48, 16, 0, 48, 16, -48
1112 .byte
0, 0, 40, 24, 0, 40, 24, -40
1113 .byte
0, 0, 32, 32, 0, 32, 32, -32
1114 .byte
0, 0, 24, 40, 0, 24, 40, -24
1115 .byte
0, 0, 16, 48, 0, 16, 48, -16
1116 .byte
0, 0, 8, 56, 0, 8, 56, -8
1117 .Lout_align
: ! fshiftorx parameters
for right
shift toward
%rs2
1118 .byte
0, 0, 0, 64, 0, 0, 64, 0
1119 .byte
0, 0, 8, 56, 0, 8, 56, -8
1120 .byte
0, 0, 16, 48, 0, 16, 48, -16
1121 .byte
0, 0, 24, 40, 0, 24, 40, -24
1122 .byte
0, 0, 32, 32, 0, 32, 32, -32
1123 .byte
0, 0, 40, 24, 0, 40, 24, -40
1124 .byte
0, 0, 48, 16, 0, 48, 16, -48
1125 .byte
0, 0, 56, 8, 0, 56, 8, -56
1128 .asciz
"AES for Fujitsu SPARC64 X, CRYPTOGAMS by <appro\@openssl.org>"
1132 # Purpose of these subroutines is to explicitly encode VIS instructions,
1133 # so that one can compile the module without having to specify VIS
1134 # extensions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
1135 # Idea is to reserve for option to produce "universal" binary and let
1136 # programmer detect if current CPU is VIS capable at run-time.
1138 my ($mnemonic,$rs1,$rs2,$rd)=@_;
1140 my %visopf = ( "faligndata" => 0x048,
1141 "bshuffle" => 0x04c,
1146 $ref = "$mnemonic\t$rs1,$rs2,$rd";
1148 if ($opf=$visopf{$mnemonic}) {
1149 foreach ($rs1,$rs2,$rd) {
1150 return $ref if (!/%f([0-9]{1,2})/);
1153 return $ref if ($1&1);
1154 # re-encode for upper double register addressing
1159 return sprintf ".word\t0x%08x !%s",
1160 0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
1168 my ($mnemonic,$rs1,$rs2,$rd)=@_;
1169 my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 );
1171 my %visopf = ( "alignaddr" => 0x018,
1173 "alignaddrl" => 0x01a );
1175 $ref = "$mnemonic\t$rs1,$rs2,$rd";
1177 if ($opf=$visopf{$mnemonic}) {
1178 foreach ($rs1,$rs2,$rd) {
1179 return $ref if (!/%([goli])([0-9])/);
1183 return sprintf ".word\t0x%08x !%s",
1184 0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
1192 my ($mnemonic,$rs1,$rs2,$rd)=@_;
1194 my %aesopf = ( "faesencx" => 0x90,
1196 "faesenclx" => 0x92,
1197 "faesdeclx" => 0x93,
1198 "faeskeyx" => 0x94 );
1200 $ref = "$mnemonic\t$rs1,$rs2,$rd";
1202 if (defined($opf=$aesopf{$mnemonic})) {
1203 $rs2 = ($rs2 =~ /%f([0-6]*[02468])/) ?
(($1|$1>>5)&31) : $rs2;
1204 $rs2 = oct($rs2) if ($rs2 =~ /^0/);
1206 foreach ($rs1,$rd) {
1207 return $ref if (!/%f([0-9]{1,2})/);
1210 return $ref if ($1&1);
1211 # re-encode for upper double register addressing
1216 return sprintf ".word\t0x%08x !%s",
1217 2<<30|$rd<<25|0x36<<19|$rs1<<14|$opf<<5|$rs2,
1225 my ($mnemonic,$rs1,$rs2,$rs3,$rd)=@_;
1227 my %aesopf = ( "fshiftorx" => 0x0b );
1229 $ref = "$mnemonic\t$rs1,$rs2,$rs3,$rd";
1231 if (defined($opf=$aesopf{$mnemonic})) {
1232 foreach ($rs1,$rs2,$rs3,$rd) {
1233 return $ref if (!/%f([0-9]{1,2})/);
1236 return $ref if ($1&1);
1237 # re-encode for upper double register addressing
1242 return sprintf ".word\t0x%08x !%s",
1243 2<<30|$rd<<25|0x37<<19|$rs1<<14|$rs3<<9|$opf<<5|$rs2,
1250 foreach (split("\n",$code)) {
1251 s/\`([^\`]*)\`/eval $1/ge;
1253 s/%f([0-9]+)#lo/sprintf "%%f%d",$1+1/ge;
1255 s
/\b(faes[^x]{3,4}x)\s+(%f[0-9]{1,2}),\s*([%fx0-9]+),\s*(%f[0-9]{1,2})/
1258 s
/\b([f][^\s]*)\s+(%f[0-9]{1,2}),\s*(%f[0-9]{1,2}),\s*(%f[0-9]{1,2}),\s*(%f[0-9]{1,2})/
1259 &unfx3src
($1,$2,$3,$4,$5)
1261 s
/\b([fb][^\s]*)\s+(%f[0-9]{1,2}),\s*(%f[0-9]{1,2}),\s*(%f[0-9]{1,2})/
1264 s
/\b(alignaddr[l]*)\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/
1265 &unvis3
($1,$2,$3,$4)