3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
12 # The module implements "4-bit" GCM GHASH function and underlying
13 # single multiplication operation in GF(2^128). "4-bit" means that it
14 # uses 256 bytes per-key table [+32 bytes shared table]. There is no
15 # experimental performance data available yet. The only approximation
16 # that can be made at this point is based on code size. Inner loop is
17 # 32 instructions long and on single-issue core should execute in <40
18 # cycles. Having verified that gcc 3.4 didn't unroll corresponding
19 # loop, this assembler loop body was found to be ~3x smaller than
20 # compiler-generated one...
24 # Rescheduling for dual-issue pipeline resulted in 8.5% improvement on
25 # Cortex A8 core and ~25 cycles per processed byte (which was observed
26 # to be ~3 times faster than gcc-generated code:-)
30 # Profiler-assisted and platform-specific optimization resulted in 7%
31 # improvement on Cortex A8 core and ~23.5 cycles per byte.
35 # Add NEON implementation featuring polynomial multiplication, i.e. no
36 # lookup tables involved. On Cortex A8 it was measured to process one
37 # byte in 15 cycles or 55% faster than integer-only code.
41 # Switch to multiplication algorithm suggested in paper referred
42 # below and combine it with reduction algorithm from x86 module.
43 # Performance improvement over previous version varies from 65% on
44 # Snapdragon S4 to 110% on Cortex A9. In absolute terms Cortex A8
45 # processes one byte in 8.45 cycles, A9 - in 10.2, Snapdragon S4 -
48 # Câmara, D.; Gouvêa, C. P. L.; López, J. & Dahab, R.: Fast Software
49 # Polynomial Multiplication on ARM Processors using the NEON Engine.
51 # http://conradoplg.cryptoland.net/files/2010/12/mocrysen13.pdf
53 # ====================================================================
54 # Note about "528B" variant. In ARM case it makes lesser sense to
55 # implement it for following reasons:
57 # - performance improvement won't be anywhere near 50%, because 128-
58 # bit shift operation is neatly fused with 128-bit xor here, and
59 # "538B" variant would eliminate only 4-5 instructions out of 32
60 # in the inner loop (meaning that estimated improvement is ~15%);
61 # - ARM-based systems are often embedded ones and extra memory
62 # consumption might be unappreciated (for so little improvement);
64 # Byte order [in]dependence. =========================================
66 # Caller is expected to maintain specific *dword* order in Htable,
67 # namely with *least* significant dword of 128-bit value at *lower*
68 # address. This differs completely from C code and has everything to
69 # do with ldm instruction and order in which dwords are "consumed" by
70 # algorithm. *Byte* order within these dwords in turn is whatever
71 # *native* byte order on current platform. See gcm128.c for working
74 while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
75 open STDOUT
,">$output";
77 $Xi="r0"; # argument block
82 $Zll="r4"; # variables
91 ################# r13 is stack pointer
93 ################# r15 is program counter
95 $rem_4bit=$inp; # used in gcm_gmult_4bit
101 for ($Zll,$Zlh,$Zhl,$Zhh) {
103 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
106 #elif defined(__ARMEB__)
112 strb
$Tlh,[$Xi,#$i+2]
114 strb
$Thl,[$Xi,#$i+1]
118 $code.="\t".shift(@args)."\n";
124 #include "arm_arch.h"
129 .type rem_4bit
,%object
132 .short
0x0000,0x1C20,0x3840,0x2460
133 .short
0x7080,0x6CA0,0x48C0,0x54E0
134 .short
0xE100,0xFD20,0xD940,0xC560
135 .short
0x9180,0x8DA0,0xA9C0,0xB5E0
136 .size rem_4bit
,.-rem_4bit
138 .type rem_4bit_get
,%function
141 sub $rem_4bit,$rem_4bit,#32 @ &rem_4bit
144 .size rem_4bit_get
,.-rem_4bit_get
146 .global gcm_ghash_4bit
147 .type gcm_ghash_4bit
,%function
150 add
$len,$inp,$len @
$len to point at the end
151 stmdb sp
!,{r3
-r11
,lr
} @ save
$len/end too
152 sub r12
,r12
,#48 @ &rem_4bit
154 ldmia r12
,{r4
-r11
} @ copy rem_4bit
...
155 stmdb sp
!,{r4
-r11
} @
... to stack
165 add
$Zhh,$Htbl,$nlo,lsl
#4
166 ldmia
$Zhh,{$Zll-$Zhh} @ load Htbl
[nlo
]
170 and $nhi,$Zll,#0xf @ rem
171 ldmia
$Thh,{$Tll-$Thh} @ load Htbl
[nhi
]
173 eor
$Zll,$Tll,$Zll,lsr
#4
174 ldrh
$Tll,[sp
,$nhi] @ rem_4bit
[rem
]
175 eor
$Zll,$Zll,$Zlh,lsl
#28
177 eor
$Zlh,$Tlh,$Zlh,lsr
#4
178 eor
$Zlh,$Zlh,$Zhl,lsl
#28
179 eor
$Zhl,$Thl,$Zhl,lsr
#4
180 eor
$Zhl,$Zhl,$Zhh,lsl
#28
181 eor
$Zhh,$Thh,$Zhh,lsr
#4
185 eor
$Zhh,$Zhh,$Tll,lsl
#16
188 add
$Thh,$Htbl,$nlo,lsl
#4
189 and $nlo,$Zll,#0xf @ rem
192 ldmia
$Thh,{$Tll-$Thh} @ load Htbl
[nlo
]
193 eor
$Zll,$Tll,$Zll,lsr
#4
194 eor
$Zll,$Zll,$Zlh,lsl
#28
195 eor
$Zlh,$Tlh,$Zlh,lsr
#4
196 eor
$Zlh,$Zlh,$Zhl,lsl
#28
197 ldrh
$Tll,[sp
,$nlo] @ rem_4bit
[rem
]
198 eor
$Zhl,$Thl,$Zhl,lsr
#4
199 ldrplb
$nlo,[$inp,$cnt]
200 eor
$Zhl,$Zhl,$Zhh,lsl
#28
201 eor
$Zhh,$Thh,$Zhh,lsr
#4
204 and $nhi,$Zll,#0xf @ rem
205 eor
$Zhh,$Zhh,$Tll,lsl
#16 @ ^= rem_4bit[rem]
207 ldmia
$Thh,{$Tll-$Thh} @ load Htbl
[nhi
]
208 eor
$Zll,$Tll,$Zll,lsr
#4
209 ldrplb
$Tll,[$Xi,$cnt]
210 eor
$Zll,$Zll,$Zlh,lsl
#28
211 eor
$Zlh,$Tlh,$Zlh,lsr
#4
213 eor
$Zlh,$Zlh,$Zhl,lsl
#28
214 eor
$Zhl,$Thl,$Zhl,lsr
#4
215 eor
$Zhl,$Zhl,$Zhh,lsl
#28
217 eor
$Zhh,$Thh,$Zhh,lsr
#4
218 andpl
$nhi,$nlo,#0xf0
219 andpl
$nlo,$nlo,#0x0f
220 eor
$Zhh,$Zhh,$Tlh,lsl
#16 @ ^= rem_4bit[rem]
223 ldr
$len,[sp
,#32] @ re-load $len/end
227 &Zsmash
("cmp\t$inp,$len","ldrneb\t$nlo,[$inp,#15]");
233 ldmia sp
!,{r4
-r11
,pc
}
235 ldmia sp
!,{r4
-r11
,lr
}
237 moveq pc
,lr @ be binary compatible with V4
, yet
238 bx lr @ interoperable with Thumb ISA
:-)
240 .size gcm_ghash_4bit
,.-gcm_ghash_4bit
242 .global gcm_gmult_4bit
243 .type gcm_gmult_4bit
,%function
245 stmdb sp
!,{r4
-r11
,lr
}
253 add
$Zhh,$Htbl,$nlo,lsl
#4
254 ldmia
$Zhh,{$Zll-$Zhh} @ load Htbl
[nlo
]
258 and $nhi,$Zll,#0xf @ rem
259 ldmia
$Thh,{$Tll-$Thh} @ load Htbl
[nhi
]
261 eor
$Zll,$Tll,$Zll,lsr
#4
262 ldrh
$Tll,[$rem_4bit,$nhi] @ rem_4bit
[rem
]
263 eor
$Zll,$Zll,$Zlh,lsl
#28
264 eor
$Zlh,$Tlh,$Zlh,lsr
#4
265 eor
$Zlh,$Zlh,$Zhl,lsl
#28
266 eor
$Zhl,$Thl,$Zhl,lsr
#4
267 eor
$Zhl,$Zhl,$Zhh,lsl
#28
268 eor
$Zhh,$Thh,$Zhh,lsr
#4
270 eor
$Zhh,$Zhh,$Tll,lsl
#16
274 add
$Thh,$Htbl,$nlo,lsl
#4
275 and $nlo,$Zll,#0xf @ rem
278 ldmia
$Thh,{$Tll-$Thh} @ load Htbl
[nlo
]
279 eor
$Zll,$Tll,$Zll,lsr
#4
280 eor
$Zll,$Zll,$Zlh,lsl
#28
281 eor
$Zlh,$Tlh,$Zlh,lsr
#4
282 eor
$Zlh,$Zlh,$Zhl,lsl
#28
283 ldrh
$Tll,[$rem_4bit,$nlo] @ rem_4bit
[rem
]
284 eor
$Zhl,$Thl,$Zhl,lsr
#4
285 ldrplb
$nlo,[$Xi,$cnt]
286 eor
$Zhl,$Zhl,$Zhh,lsl
#28
287 eor
$Zhh,$Thh,$Zhh,lsr
#4
290 and $nhi,$Zll,#0xf @ rem
291 eor
$Zhh,$Zhh,$Tll,lsl
#16 @ ^= rem_4bit[rem]
293 ldmia
$Thh,{$Tll-$Thh} @ load Htbl
[nhi
]
294 eor
$Zll,$Tll,$Zll,lsr
#4
295 eor
$Zll,$Zll,$Zlh,lsl
#28
296 eor
$Zlh,$Tlh,$Zlh,lsr
#4
297 ldrh
$Tll,[$rem_4bit,$nhi] @ rem_4bit
[rem
]
298 eor
$Zlh,$Zlh,$Zhl,lsl
#28
299 eor
$Zhl,$Thl,$Zhl,lsr
#4
300 eor
$Zhl,$Zhl,$Zhh,lsl
#28
301 eor
$Zhh,$Thh,$Zhh,lsr
#4
302 andpl
$nhi,$nlo,#0xf0
303 andpl
$nlo,$nlo,#0x0f
304 eor
$Zhh,$Zhh,$Tll,lsl
#16 @ ^= rem_4bit[rem]
310 ldmia sp
!,{r4
-r11
,pc
}
312 ldmia sp
!,{r4
-r11
,lr
}
314 moveq pc
,lr @ be binary compatible with V4
, yet
315 bx lr @ interoperable with Thumb ISA
:-)
317 .size gcm_gmult_4bit
,.-gcm_gmult_4bit
320 my ($Xl,$Xm,$Xh,$IN)=map("q$_",(0..3));
321 my ($t0,$t1,$t2,$t3)=map("q$_",(8..12));
322 my ($Hlo,$Hhi,$Hhl,$k48,$k32,$k16)=map("d$_",(26..31));
327 vext
.8 $t0#lo, $a, $a, #1 @ A1
328 vmull
.p8
$t0, $t0#lo, $b @ F = A1*B
329 vext
.8 $r#lo, $b, $b, #1 @ B1
330 vmull
.p8
$r, $a, $r#lo @ E = A*B1
331 vext
.8 $t1#lo, $a, $a, #2 @ A2
332 vmull
.p8
$t1, $t1#lo, $b @ H = A2*B
333 vext
.8 $t3#lo, $b, $b, #2 @ B2
334 vmull
.p8
$t3, $a, $t3#lo @ G = A*B2
335 vext
.8 $t2#lo, $a, $a, #3 @ A3
336 veor
$t0, $t0, $r @ L
= E
+ F
337 vmull
.p8
$t2, $t2#lo, $b @ J = A3*B
338 vext
.8 $r#lo, $b, $b, #3 @ B3
339 veor
$t1, $t1, $t3 @ M
= G
+ H
340 vmull
.p8
$r, $a, $r#lo @ I = A*B3
341 veor
$t0#lo, $t0#lo, $t0#hi @ t0 = (L) (P0 + P1) << 8
342 vand
$t0#hi, $t0#hi, $k48
343 vext
.8 $t3#lo, $b, $b, #4 @ B4
344 veor
$t1#lo, $t1#lo, $t1#hi @ t1 = (M) (P2 + P3) << 16
345 vand
$t1#hi, $t1#hi, $k32
346 vmull
.p8
$t3, $a, $t3#lo @ K = A*B4
347 veor
$t2, $t2, $r @ N
= I
+ J
348 veor
$t0#lo, $t0#lo, $t0#hi
349 veor
$t1#lo, $t1#lo, $t1#hi
350 veor
$t2#lo, $t2#lo, $t2#hi @ t2 = (N) (P4 + P5) << 24
351 vand
$t2#hi, $t2#hi, $k16
352 vext
.8 $t0, $t0, $t0, #15
353 veor
$t3#lo, $t3#lo, $t3#hi @ t3 = (K) (P6 + P7) << 32
355 vext
.8 $t1, $t1, $t1, #14
356 veor
$t2#lo, $t2#lo, $t2#hi
357 vmull
.p8
$r, $a, $b @ D
= A
*B
358 vext
.8 $t3, $t3, $t3, #12
359 vext
.8 $t2, $t2, $t2, #13
368 #if __ARM_MAX_ARCH__>=7
372 .global gcm_init_neon
373 .type gcm_init_neon
,%function
376 vld1
.64
$IN#hi,[r1,:64]! @ load H
378 vld1
.64
$IN#lo,[r1,:64]
380 vshr
.u64
$t0#lo,#63 @ t0=0xc2....01
382 vshr
.u64
$Hlo,$IN#lo,#63
383 vshr
.s8
$t1,#7 @ broadcast carry bit
386 vorr
$IN#hi,$Hlo @ H<<<=1
387 veor
$IN,$IN,$t0 @ twisted H
391 .size gcm_init_neon
,.-gcm_init_neon
393 .global gcm_gmult_neon
394 .type gcm_gmult_neon
,%function
397 vld1
.64
$IN#hi,[$Xi,:64]! @ load Xi
398 vld1
.64
$IN#lo,[$Xi,:64]!
399 vmov
.i64
$k48,#0x0000ffffffffffff
400 vldmia
$Htbl,{$Hlo-$Hhi} @ load twisted H
401 vmov
.i64
$k32,#0x00000000ffffffff
405 vmov
.i64
$k16,#0x000000000000ffff
406 veor
$Hhl,$Hlo,$Hhi @ Karatsuba pre
-processing
409 .size gcm_gmult_neon
,.-gcm_gmult_neon
411 .global gcm_ghash_neon
412 .type gcm_ghash_neon
,%function
415 vld1
.64
$Xl#hi,[$Xi,:64]! @ load Xi
416 vld1
.64
$Xl#lo,[$Xi,:64]!
417 vmov
.i64
$k48,#0x0000ffffffffffff
418 vldmia
$Htbl,{$Hlo-$Hhi} @ load twisted H
419 vmov
.i64
$k32,#0x00000000ffffffff
423 vmov
.i64
$k16,#0x000000000000ffff
424 veor
$Hhl,$Hlo,$Hhi @ Karatsuba pre
-processing
427 vld1
.64
$IN#hi,[$inp]! @ load inp
428 vld1
.64
$IN#lo,[$inp]!
432 veor
$IN,$Xl @ inp
^=Xi
435 &clmul64x64
($Xl,$Hlo,"$IN#lo"); # H.lo·Xi.lo
437 veor
$IN#lo,$IN#lo,$IN#hi @ Karatsuba pre-processing
439 &clmul64x64
($Xm,$Hhl,"$IN#lo"); # (H.lo+H.hi)·(Xi.lo+Xi.hi)
440 &clmul64x64
($Xh,$Hhi,"$IN#hi"); # H.hi·Xi.hi
442 veor
$Xm,$Xm,$Xl @ Karatsuba post
-processing
444 veor
$Xl#hi,$Xl#hi,$Xm#lo
445 veor
$Xh#lo,$Xh#lo,$Xm#hi @ Xh|Xl - 256-bit result
447 @ equivalent of reduction_avx from ghash
-x86_64
.pl
448 vshl
.i64
$t1,$Xl,#57 @ 1st phase
453 veor
$Xl#hi,$Xl#hi,$t2#lo @
454 veor
$Xh#lo,$Xh#lo,$t2#hi
456 vshr
.u64
$t2,$Xl,#1 @ 2nd phase
460 vshr
.u64
$Xl,$Xl,#1 @
471 vst1
.64
$Xl#hi,[$Xi,:64]! @ write out Xi
472 vst1
.64
$Xl#lo,[$Xi,:64]
475 .size gcm_ghash_neon
,.-gcm_ghash_neon
480 .asciz
"GHASH for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
484 foreach (split("\n",$code)) {
485 s/\`([^\`]*)\`/eval $1/geo;
487 s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or
488 s/\bret\b/bx lr/go or
489 s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4
493 close STDOUT
; # enforce flush