2 # Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
17 # GHASH for ARMv8 Crypto Extension, 64-bit polynomial multiplication.
20 # Initial version was developed in tight cooperation with Ard Biesheuvel
21 # of Linaro from bits-n-pieces from other assembly modules. Just like
22 # aesv8-armx.pl this module supports both AArch32 and AArch64 execution modes.
25 # Implement 2x aggregated reduction [see ghash-x86.pl for background
28 # Current performance in cycles per processed byte:
30 # PMULL[2] 32-bit NEON(*)
32 # Cortex-A53 1.01 8.39
33 # Cortex-A57 1.17 7.61
37 # (*) presented for reference/comparison purposes;
42 $0 =~ m/(.*[\/\\])[^\
/\\]+$/; $dir=$1;
43 ( $xlate="${dir}arm-xlate.pl" and -f
$xlate ) or
44 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f
$xlate) or
45 die "can't locate arm-xlate.pl";
47 open OUT
,"| \"$^X\" $xlate $flavour $output";
50 $Xi="x0"; # argument block
58 my ($Xl,$Xm,$Xh,$IN)=map("q$_",(0..3));
59 my ($t0,$t1,$t2,$xC2,$H,$Hhl,$H2)=map("q$_",(8..14));
66 $code.=".arch armv8-a+crypto\n" if ($flavour =~ /64/);
67 $code.=<<___
if ($flavour !~ /64/);
73 ################################################################################
74 # void gcm_init_v8(u128 Htable[16],const u64 H[2]);
76 # input: 128-bit H - secret parameter E(K,0^128)
77 # output: precomputed table filled with degrees of twisted H;
78 # H is twisted to handle reverse bitness of GHASH;
79 # only few of 16 slots of Htable[16] are used;
80 # data is opaque to outside world (which allows to
81 # optimize the code independently);
85 .type gcm_init_v8
,%function
88 vld1
.64
{$t1},[x1
] @ load input H
90 vshl
.i64
$xC2,$xC2,#57 @ 0xc2.0
94 vext
.8 $t0,$t2,$xC2,#8 @ t0=0xc2....01
96 vshr
.s32
$t1,$t1,#31 @ broadcast carry bit
101 vorr
$IN,$IN,$t2 @ H
<<<=1
102 veor
$H,$IN,$t0 @ twisted H
103 vst1
.64
{$H},[x0
],#16 @ store Htable[0]
106 vext
.8 $t0,$H,$H,#8 @ Karatsuba pre-processing
109 vpmull2
.p64
$Xh,$H,$H
110 vpmull
.p64
$Xm,$t0,$t0
112 vext
.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
116 vpmull
.p64
$t2,$Xl,$xC2 @
1st phase
118 vmov
$Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
119 vmov
$Xm#hi,$Xl#lo @ Xm is rotated Xl
122 vext
.8 $t2,$Xl,$Xl,#8 @ 2nd phase
123 vpmull
.p64
$Xl,$Xl,$xC2
127 vext
.8 $t1,$H2,$H2,#8 @ Karatsuba pre-processing
129 vext
.8 $Hhl,$t0,$t1,#8 @ pack Karatsuba pre-processed
130 vst1
.64
{$Hhl-$H2},[x0
] @ store Htable
[1..2]
133 .size gcm_init_v8
,.-gcm_init_v8
135 ################################################################################
136 # void gcm_gmult_v8(u64 Xi[2],const u128 Htable[16]);
138 # input: Xi - current hash value;
139 # Htable - table precomputed in gcm_init_v8;
140 # output: Xi - next hash value Xi;
144 .type gcm_gmult_v8
,%function
147 vld1
.64
{$t1},[$Xi] @ load Xi
149 vld1
.64
{$H-$Hhl},[$Htbl] @ load twisted H
, ...
150 vshl
.u64
$xC2,$xC2,#57
154 vext
.8 $IN,$t1,$t1,#8
156 vpmull
.p64
$Xl,$H,$IN @ H
.lo·Xi
.lo
157 veor
$t1,$t1,$IN @ Karatsuba pre
-processing
158 vpmull2
.p64
$Xh,$H,$IN @ H
.hi·Xi
.hi
159 vpmull
.p64
$Xm,$Hhl,$t1 @
(H
.lo
+H
.hi
)·
(Xi
.lo
+Xi
.hi
)
161 vext
.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
165 vpmull
.p64
$t2,$Xl,$xC2 @
1st phase of reduction
167 vmov
$Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
168 vmov
$Xm#hi,$Xl#lo @ Xm is rotated Xl
171 vext
.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction
172 vpmull
.p64
$Xl,$Xl,$xC2
179 vext
.8 $Xl,$Xl,$Xl,#8
180 vst1
.64
{$Xl},[$Xi] @
write out Xi
183 .size gcm_gmult_v8
,.-gcm_gmult_v8
185 ################################################################################
186 # void gcm_ghash_v8(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
188 # input: table precomputed in gcm_init_v8;
189 # current hash value Xi;
190 # pointer to input data;
191 # length of input data in bytes, but divisible by block size;
192 # output: next hash value Xi;
196 .type gcm_ghash_v8
,%function
200 $code.=<<___
if ($flavour !~ /64/);
201 vstmdb sp
!,{d8
-d15
} @
32-bit ABI says so
204 vld1
.64
{$Xl},[$Xi] @ load
[rotated
] Xi
205 @
"[rotated]" means that
206 @ loaded value would have
207 @ to be rotated
in order to
208 @ make it appear as
in
209 @ algorithm specification
210 subs
$len,$len,#32 @ see if $len is 32 or larger
211 mov
$inc,#16 @ $inc is used as post-
212 @ increment
for input pointer
;
213 @ as
loop is modulo
-scheduled
214 @
$inc is zeroed just
in time
215 @ to preclude overstepping
216 @ inp
[len
], which means that
217 @
last block
[s
] are actually
218 @ loaded twice
, but
last
219 @ copy is
not processed
220 vld1
.64
{$H-$Hhl},[$Htbl],#32 @ load twisted H, ..., H^2
222 vld1
.64
{$H2},[$Htbl]
223 cclr
$inc,eq @ is it
time to zero
$inc?
224 vext
.8 $Xl,$Xl,$Xl,#8 @ rotate Xi
225 vld1
.64
{$t0},[$inp],#16 @ load [rotated] I[0]
226 vshl
.u64
$xC2,$xC2,#57 @ compose 0xc2.0 constant
231 vext
.8 $IN,$t0,$t0,#8 @ rotate I[0]
232 b
.lo
.Lodd_tail_v8 @
$len was less than
32
234 { my ($Xln,$Xmn,$Xhn,$In) = map("q$_",(4..7));
236 # Xi+2 =[H*(Ii+1 + Xi+1)] mod P =
237 # [(H*Ii+1) + (H*Xi+1)] mod P =
238 # [(H*Ii+1) + H^2*(Ii+Xi)] mod P
241 vld1
.64
{$t1},[$inp],$inc @ load
[rotated
] I
[1]
245 vext
.8 $In,$t1,$t1,#8
246 veor
$IN,$IN,$Xl @ I
[i
]^=Xi
247 vpmull
.p64
$Xln,$H,$In @ H·Ii
+1
248 veor
$t1,$t1,$In @ Karatsuba pre
-processing
249 vpmull2
.p64
$Xhn,$H,$In
254 vext
.8 $t2,$IN,$IN,#8
255 subs
$len,$len,#32 @ is there more data?
256 vpmull
.p64
$Xl,$H2,$IN @ H
^2.lo·Xi
.lo
257 cclr
$inc,lo @ is it
time to zero
$inc?
259 vpmull
.p64
$Xmn,$Hhl,$t1
260 veor
$t2,$t2,$IN @ Karatsuba pre
-processing
261 vpmull2
.p64
$Xh,$H2,$IN @ H
^2.hi·Xi
.hi
262 veor
$Xl,$Xl,$Xln @ accumulate
263 vpmull2
.p64
$Xm,$Hhl,$t2 @
(H
^2.lo
+H
^2.hi
)·
(Xi
.lo
+Xi
.hi
)
264 vld1
.64
{$t0},[$inp],$inc @ load
[rotated
] I
[i
+2]
267 cclr
$inc,eq @ is it
time to zero
$inc?
270 vext
.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
273 vld1
.64
{$t1},[$inp],$inc @ load
[rotated
] I
[i
+3]
278 vpmull
.p64
$t2,$Xl,$xC2 @
1st phase of reduction
283 vmov
$Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
284 vmov
$Xm#hi,$Xl#lo @ Xm is rotated Xl
285 vext
.8 $In,$t1,$t1,#8
286 vext
.8 $IN,$t0,$t0,#8
288 vpmull
.p64
$Xln,$H,$In @ H·Ii
+1
289 veor
$IN,$IN,$Xh @ accumulate
$IN early
291 vext
.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction
292 vpmull
.p64
$Xl,$Xl,$xC2
294 veor
$t1,$t1,$In @ Karatsuba pre
-processing
296 vpmull2
.p64
$Xhn,$H,$In
297 b
.hs
.Loop_mod2x_v8 @ there was at least
32 more bytes
300 vext
.8 $IN,$t0,$t0,#8 @ re-construct $IN
301 adds
$len,$len,#32 @ re-construct $len
302 veor
$Xl,$Xl,$Xh @ re
-construct
$Xl
303 b
.eq .Ldone_v8 @ is
$len zero?
308 vext
.8 $t2,$Xl,$Xl,#8
309 veor
$IN,$IN,$Xl @ inp
^=Xi
310 veor
$t1,$t0,$t2 @
$t1 is rotated inp
^Xi
312 vpmull
.p64
$Xl,$H,$IN @ H
.lo·Xi
.lo
313 veor
$t1,$t1,$IN @ Karatsuba pre
-processing
314 vpmull2
.p64
$Xh,$H,$IN @ H
.hi·Xi
.hi
315 vpmull
.p64
$Xm,$Hhl,$t1 @
(H
.lo
+H
.hi
)·
(Xi
.lo
+Xi
.hi
)
317 vext
.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
321 vpmull
.p64
$t2,$Xl,$xC2 @
1st phase of reduction
323 vmov
$Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
324 vmov
$Xm#hi,$Xl#lo @ Xm is rotated Xl
327 vext
.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction
328 vpmull
.p64
$Xl,$Xl,$xC2
336 vext
.8 $Xl,$Xl,$Xl,#8
337 vst1
.64
{$Xl},[$Xi] @
write out Xi
340 $code.=<<___
if ($flavour !~ /64/);
341 vldmia sp
!,{d8
-d15
} @
32-bit ABI says so
345 .size gcm_ghash_v8
,.-gcm_ghash_v8
349 .asciz
"GHASH for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
353 if ($flavour =~ /64/) { ######## 64-bit code
357 $arg =~ m/q([0-9]+)#(lo|hi),\s*q([0-9]+)#(lo|hi)/o &&
358 sprintf "ins v%d.d[%d],v%d.d[%d]",$1,($2 eq "lo")?
0:1,$3,($4 eq "lo")?
0:1;
360 foreach(split("\n",$code)) {
361 s/cclr\s+([wx])([^,]+),\s*([a-z]+)/csel $1$2,$1zr,$1$2,$3/o or
362 s/vmov\.i8/movi/o or # fix up legacy mnemonics
363 s/vmov\s+(.*)/unvmov($1)/geo or
365 s/vshr\.s/sshr\.s/o or
367 s/^(\s+)v/$1/o or # strip off v prefix
370 s/\bq([0-9]+)\b/"v".($1<8?$1:$1+8).".16b"/geo; # old->new registers
371 s/@\s/\/\
//o; # old->new style commentary
373 # fix up remaining legacy suffixes
375 s/\.[uis]?32//o and s/\.16b/\.4s/go;
376 m/\.p64/o and s/\.16b/\.1q/o; # 1st pmull argument
377 m/l\.p64/o and s/\.16b/\.1d/go; # 2nd and 3rd pmull arguments
378 s/\.[uisp]?64//o and s/\.16b/\.2d/go;
379 s/\.[42]([sd])\[([0-3])\]/\.$1\[$2\]/o;
383 } else { ######## 32-bit code
387 $arg =~ m/q([0-9]+),\s*q([0-9]+)\[([0-3])\]/o &&
388 sprintf "vdup.32 q%d,d%d[%d]",$1,2*$2+($3>>1),$3&1;
391 my ($mnemonic,$arg)=@_;
393 if ($arg =~ m/q([0-9]+),\s*q([0-9]+),\s*q([0-9]+)/o) {
394 my $word = 0xf2a00e00|(($1&7)<<13)|(($1&8)<<19)
395 |(($2&7)<<17)|(($2&8)<<4)
396 |(($3&7)<<1) |(($3&8)<<2);
397 $word |= 0x00010001 if ($mnemonic =~ "2");
398 # since ARMv7 instructions are always encoded little-endian.
399 # correct solution is to use .inst directive, but older
400 # assemblers don't implement it:-(
401 sprintf ".byte\t0x%02x,0x%02x,0x%02x,0x%02x\t@ %s %s",
402 $word&0xff,($word>>8)&0xff,
403 ($word>>16)&0xff,($word>>24)&0xff,
408 foreach(split("\n",$code)) {
409 s/\b[wx]([0-9]+)\b/r$1/go; # new->old registers
410 s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go; # new->old registers
411 s/\/\/\s?
/@ /o; # new->old style commentary
413 # fix up remaining new-style suffixes
416 s/cclr\s+([^,]+),\s*([a-z]+)/mov$2 $1,#0/o or
417 s/vdup\.32\s+(.*)/unvdup32($1)/geo or
418 s/v?(pmull2?)\.p64\s+(.*)/unvpmullp64($1,$2)/geo or
419 s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or
421 s/^(\s+)ret/$1bx\tlr/o;
427 close STDOUT
; # enforce flush