]>
git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/modes/asm/ghashv8-armx.pl
54a1ac4db8d3b27c2368fd719aebd9d31c396b4f
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
10 # GHASH for ARMv8 Crypto Extension, 64-bit polynomial multiplication.
14 # Initial version was developed in tight cooperation with Ard
15 # Biesheuvel <ard.biesheuvel@linaro.org> from bits-n-pieces from
16 # other assembly modules. Just like aesv8-armx.pl this module
17 # supports both AArch32 and AArch64 execution modes.
19 # Current performance in cycles per processed byte:
21 # PMULL[2] 32-bit NEON(*)
23 # Cortex-A53 1.45 8.39
24 # Cortex-A57 2.22 7.61
26 # (*) presented for reference/comparison purposes;
29 open STDOUT
,">".shift;
31 $Xi="x0"; # argument block
39 my ($Xl,$Xm,$Xh,$IN)=map("q$_",(0..3));
40 my ($t0,$t1,$t2,$t3,$H,$Hhl)=map("q$_",(8..14));
47 $code.=".arch armv8-a+crypto\n" if ($flavour =~ /64/);
48 $code.=".fpu neon\n.code 32\n" if ($flavour !~ /64/);
52 .type gcm_init_v8
,%function
55 vld1
.64
{$t1},[x1
] @ load H
60 vext
.8 $t0,$t2,$t0,#8 @ t0=0xc2....01
63 vshr
.s32
$t1,$t1,#31 @ broadcast carry bit
68 vorr
$IN,$IN,$t3 @ H
<<<=1
69 veor
$IN,$IN,$t0 @ twisted H
73 .size gcm_init_v8
,.-gcm_init_v8
76 .type gcm_gmult_v8
,%function
79 vld1
.64
{$t1},[$Xi] @ load Xi
81 vld1
.64
{$H},[$Htbl] @ load twisted H
90 veor
$Hhl,$Hhl,$H @ Karatsuba pre
-processing
93 .size gcm_gmult_v8
,.-gcm_gmult_v8
96 .type gcm_ghash_v8
,%function
99 vld1
.64
{$Xl},[$Xi] @ load
[rotated
] Xi
103 vld1
.64
{$H},[$Htbl] @ load twisted H
105 vext
.8 $Xl,$Xl,$Xl,#8
107 vld1
.64
{$t1},[$inp],$inc @ load
[rotated
] inp
113 veor
$Hhl,$Hhl,$H @ Karatsuba pre
-processing
114 vext
.8 $IN,$t1,$t1,#8
119 vext
.8 $t2,$Xl,$Xl,#8
120 veor
$IN,$IN,$Xl @ inp
^=Xi
121 veor
$t1,$t1,$t2 @
$t1 is rotated inp
^Xi
124 vpmull
.p64
$Xl,$H,$IN @ H
.lo·Xi
.lo
125 veor
$t1,$t1,$IN @ Karatsuba pre
-processing
126 vpmull2
.p64
$Xh,$H,$IN @ H
.hi·Xi
.hi
128 vpmull
.p64
$Xm,$Hhl,$t1 @
(H
.lo
+H
.hi
)·
(Xi
.lo
+Xi
.hi
)
131 vext
.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
134 vld1
.64
{$t1},[$inp],$inc @ load
[rotated
] inp
136 vpmull
.p64
$t2,$Xl,$t3 @
1st phase
138 vmov
$Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
139 vmov
$Xm#hi,$Xl#lo @ Xm is rotated Xl
144 vext
.8 $IN,$t1,$t1,#8
146 vext
.8 $t2,$Xl,$Xl,#8 @ 2nd phase
147 vpmull
.p64
$Xl,$Xl,$t3
155 vext
.8 $Xl,$Xl,$Xl,#8
156 vst1
.64
{$Xl},[$Xi] @
write out Xi
159 .size gcm_ghash_v8
,.-gcm_ghash_v8
163 .asciz
"GHASH for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
167 if ($flavour =~ /64/) { ######## 64-bit code
171 $arg =~ m/q([0-9]+)#(lo|hi),\s*q([0-9]+)#(lo|hi)/o &&
172 sprintf "ins v%d.d[%d],v%d.d[%d]",$1,($2 eq "lo")?
0:1,$3,($4 eq "lo")?
0:1;
174 foreach(split("\n",$code)) {
175 s/cclr\s+([wx])([^,]+),\s*([a-z]+)/csel $1$2,$1zr,$1$2,$3/o or
176 s/vmov\.i8/movi/o or # fix up legacy mnemonics
177 s/vmov\s+(.*)/unvmov($1)/geo or
179 s/vshr\.s/sshr\.s/o or
181 s/^(\s+)v/$1/o or # strip off v prefix
184 s/\bq([0-9]+)\b/"v".($1<8?$1:$1+8).".16b"/geo; # old->new registers
185 s/@\s/\/\
//o; # old->new style commentary
187 # fix up remainig legacy suffixes
189 s/\.[uis]?32//o and s/\.16b/\.4s/go;
190 m/\.p64/o and s/\.16b/\.1q/o; # 1st pmull argument
191 m/l\.p64/o and s/\.16b/\.1d/go; # 2nd and 3rd pmull arguments
192 s/\.[uisp]?64//o and s/\.16b/\.2d/go;
193 s/\.[42]([sd])\[([0-3])\]/\.$1\[$2\]/o;
197 } else { ######## 32-bit code
201 $arg =~ m/q([0-9]+),\s*q([0-9]+)\[([0-3])\]/o &&
202 sprintf "vdup.32 q%d,d%d[%d]",$1,2*$2+($3>>1),$3&1;
205 my ($mnemonic,$arg)=@_;
207 if ($arg =~ m/q([0-9]+),\s*q([0-9]+),\s*q([0-9]+)/o) {
208 my $word = 0xf2a00e00|(($1&7)<<13)|(($1&8)<<19)
209 |(($2&7)<<17)|(($2&8)<<4)
210 |(($3&7)<<1) |(($3&8)<<2);
211 $word |= 0x00010001 if ($mnemonic =~ "2");
212 # since ARMv7 instructions are always encoded little-endian.
213 # correct solution is to use .inst directive, but older
214 # assemblers don't implement it:-(
215 sprintf ".byte\t0x%02x,0x%02x,0x%02x,0x%02x\t@ %s %s",
216 $word&0xff,($word>>8)&0xff,
217 ($word>>16)&0xff,($word>>24)&0xff,
222 foreach(split("\n",$code)) {
223 s/\b[wx]([0-9]+)\b/r$1/go; # new->old registers
224 s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go; # new->old registers
225 s/\/\/\s?
/@ /o; # new->old style commentary
227 # fix up remainig new-style suffixes
230 s/cclr\s+([^,]+),\s*([a-z]+)/mov$2 $1,#0/o or
231 s/vdup\.32\s+(.*)/unvdup32($1)/geo or
232 s/v?(pmull2?)\.p64\s+(.*)/unvpmullp64($1,$2)/geo or
233 s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or
235 s/^(\s+)ret/$1bx\tlr/o;
241 close STDOUT
; # enforce flush