]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/modes/asm/aesni-gcm-x86_64.pl
921f44eef042e21d4ad11d9ca2c9ffba38179bd0
[thirdparty/openssl.git] / crypto / modes / asm / aesni-gcm-x86_64.pl
1 #! /usr/bin/env perl
2 # Copyright 2013-2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9 #
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16 #
17 #
18 # AES-NI-CTR+GHASH stitch.
19 #
20 # February 2013
21 #
22 # OpenSSL GCM implementation is organized in such way that its
23 # performance is rather close to the sum of its streamed components,
24 # in the context parallelized AES-NI CTR and modulo-scheduled
25 # PCLMULQDQ-enabled GHASH. Unfortunately, as no stitch implementation
26 # was observed to perform significantly better than the sum of the
27 # components on contemporary CPUs, the effort was deemed impossible to
28 # justify. This module is based on combination of Intel submissions,
29 # [1] and [2], with MOVBE twist suggested by Ilya Albrekht and Max
30 # Locktyukhin of Intel Corp. who verified that it reduces shuffles
31 # pressure with notable relative improvement, achieving 1.0 cycle per
32 # byte processed with 128-bit key on Haswell processor, 0.74 - on
33 # Broadwell, 0.63 - on Skylake... [Mentioned results are raw profiled
34 # measurements for favourable packet size, one divisible by 96.
35 # Applications using the EVP interface will observe a few percent
36 # worse performance.]
37 #
38 # [1] http://rt.openssl.org/Ticket/Display.html?id=2900&user=guest&pass=guest
39 # [2] http://www.intel.com/content/dam/www/public/us/en/documents/software-support/enabling-high-performance-gcm.pdf
40
41 $flavour = shift;
42 $output = shift;
43 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
44
45 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
46
47 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
48 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
49 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
50 die "can't locate x86_64-xlate.pl";
51
52 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
53 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
54 $avx = ($1>=2.20) + ($1>=2.22);
55 }
56
57 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
58 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
59 $avx = ($1>=2.09) + ($1>=2.10);
60 }
61
62 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
63 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
64 $avx = ($1>=10) + ($1>=11);
65 }
66
67 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
68 $avx = ($2>=3.0) + ($2>3.0);
69 }
70
71 open OUT,"| \"$^X\" $xlate $flavour $output";
72 *STDOUT=*OUT;
73
74 if ($avx>1) {{{
75
76 ($inp,$out,$len,$key,$ivp,$Xip)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9");
77
78 ($Ii,$T1,$T2,$Hkey,
79 $Z0,$Z1,$Z2,$Z3,$Xi) = map("%xmm$_",(0..8));
80
81 ($inout0,$inout1,$inout2,$inout3,$inout4,$inout5,$rndkey) = map("%xmm$_",(9..15));
82
83 ($counter,$rounds,$ret,$const,$in0,$end0)=("%ebx","%ebp","%r10","%r11","%r14","%r15");
84
85 $code=<<___;
86 .text
87
88 .type _aesni_ctr32_ghash_6x,\@abi-omnipotent
89 .align 32
90 _aesni_ctr32_ghash_6x:
91 vmovdqu 0x20($const),$T2 # borrow $T2, .Lone_msb
92 sub \$6,$len
93 vpxor $Z0,$Z0,$Z0 # $Z0 = 0
94 vmovdqu 0x00-0x80($key),$rndkey
95 vpaddb $T2,$T1,$inout1
96 vpaddb $T2,$inout1,$inout2
97 vpaddb $T2,$inout2,$inout3
98 vpaddb $T2,$inout3,$inout4
99 vpaddb $T2,$inout4,$inout5
100 vpxor $rndkey,$T1,$inout0
101 vmovdqu $Z0,16+8(%rsp) # "$Z3" = 0
102 jmp .Loop6x
103
104 .align 32
105 .Loop6x:
106 add \$`6<<24`,$counter
107 jc .Lhandle_ctr32 # discard $inout[1-5]?
108 vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
109 vpaddb $T2,$inout5,$T1 # next counter value
110 vpxor $rndkey,$inout1,$inout1
111 vpxor $rndkey,$inout2,$inout2
112
113 .Lresume_ctr32:
114 vmovdqu $T1,($ivp) # save next counter value
115 vpclmulqdq \$0x10,$Hkey,$Z3,$Z1
116 vpxor $rndkey,$inout3,$inout3
117 vmovups 0x10-0x80($key),$T2 # borrow $T2 for $rndkey
118 vpclmulqdq \$0x01,$Hkey,$Z3,$Z2
119 xor %r12,%r12
120 cmp $in0,$end0
121
122 vaesenc $T2,$inout0,$inout0
123 vmovdqu 0x30+8(%rsp),$Ii # I[4]
124 vpxor $rndkey,$inout4,$inout4
125 vpclmulqdq \$0x00,$Hkey,$Z3,$T1
126 vaesenc $T2,$inout1,$inout1
127 vpxor $rndkey,$inout5,$inout5
128 setnc %r12b
129 vpclmulqdq \$0x11,$Hkey,$Z3,$Z3
130 vaesenc $T2,$inout2,$inout2
131 vmovdqu 0x10-0x20($Xip),$Hkey # $Hkey^2
132 neg %r12
133 vaesenc $T2,$inout3,$inout3
134 vpxor $Z1,$Z2,$Z2
135 vpclmulqdq \$0x00,$Hkey,$Ii,$Z1
136 vpxor $Z0,$Xi,$Xi # modulo-scheduled
137 vaesenc $T2,$inout4,$inout4
138 vpxor $Z1,$T1,$Z0
139 and \$0x60,%r12
140 vmovups 0x20-0x80($key),$rndkey
141 vpclmulqdq \$0x10,$Hkey,$Ii,$T1
142 vaesenc $T2,$inout5,$inout5
143
144 vpclmulqdq \$0x01,$Hkey,$Ii,$T2
145 lea ($in0,%r12),$in0
146 vaesenc $rndkey,$inout0,$inout0
147 vpxor 16+8(%rsp),$Xi,$Xi # modulo-scheduled [vpxor $Z3,$Xi,$Xi]
148 vpclmulqdq \$0x11,$Hkey,$Ii,$Hkey
149 vmovdqu 0x40+8(%rsp),$Ii # I[3]
150 vaesenc $rndkey,$inout1,$inout1
151 movbe 0x58($in0),%r13
152 vaesenc $rndkey,$inout2,$inout2
153 movbe 0x50($in0),%r12
154 vaesenc $rndkey,$inout3,$inout3
155 mov %r13,0x20+8(%rsp)
156 vaesenc $rndkey,$inout4,$inout4
157 mov %r12,0x28+8(%rsp)
158 vmovdqu 0x30-0x20($Xip),$Z1 # borrow $Z1 for $Hkey^3
159 vaesenc $rndkey,$inout5,$inout5
160
161 vmovups 0x30-0x80($key),$rndkey
162 vpxor $T1,$Z2,$Z2
163 vpclmulqdq \$0x00,$Z1,$Ii,$T1
164 vaesenc $rndkey,$inout0,$inout0
165 vpxor $T2,$Z2,$Z2
166 vpclmulqdq \$0x10,$Z1,$Ii,$T2
167 vaesenc $rndkey,$inout1,$inout1
168 vpxor $Hkey,$Z3,$Z3
169 vpclmulqdq \$0x01,$Z1,$Ii,$Hkey
170 vaesenc $rndkey,$inout2,$inout2
171 vpclmulqdq \$0x11,$Z1,$Ii,$Z1
172 vmovdqu 0x50+8(%rsp),$Ii # I[2]
173 vaesenc $rndkey,$inout3,$inout3
174 vaesenc $rndkey,$inout4,$inout4
175 vpxor $T1,$Z0,$Z0
176 vmovdqu 0x40-0x20($Xip),$T1 # borrow $T1 for $Hkey^4
177 vaesenc $rndkey,$inout5,$inout5
178
179 vmovups 0x40-0x80($key),$rndkey
180 vpxor $T2,$Z2,$Z2
181 vpclmulqdq \$0x00,$T1,$Ii,$T2
182 vaesenc $rndkey,$inout0,$inout0
183 vpxor $Hkey,$Z2,$Z2
184 vpclmulqdq \$0x10,$T1,$Ii,$Hkey
185 vaesenc $rndkey,$inout1,$inout1
186 movbe 0x48($in0),%r13
187 vpxor $Z1,$Z3,$Z3
188 vpclmulqdq \$0x01,$T1,$Ii,$Z1
189 vaesenc $rndkey,$inout2,$inout2
190 movbe 0x40($in0),%r12
191 vpclmulqdq \$0x11,$T1,$Ii,$T1
192 vmovdqu 0x60+8(%rsp),$Ii # I[1]
193 vaesenc $rndkey,$inout3,$inout3
194 mov %r13,0x30+8(%rsp)
195 vaesenc $rndkey,$inout4,$inout4
196 mov %r12,0x38+8(%rsp)
197 vpxor $T2,$Z0,$Z0
198 vmovdqu 0x60-0x20($Xip),$T2 # borrow $T2 for $Hkey^5
199 vaesenc $rndkey,$inout5,$inout5
200
201 vmovups 0x50-0x80($key),$rndkey
202 vpxor $Hkey,$Z2,$Z2
203 vpclmulqdq \$0x00,$T2,$Ii,$Hkey
204 vaesenc $rndkey,$inout0,$inout0
205 vpxor $Z1,$Z2,$Z2
206 vpclmulqdq \$0x10,$T2,$Ii,$Z1
207 vaesenc $rndkey,$inout1,$inout1
208 movbe 0x38($in0),%r13
209 vpxor $T1,$Z3,$Z3
210 vpclmulqdq \$0x01,$T2,$Ii,$T1
211 vpxor 0x70+8(%rsp),$Xi,$Xi # accumulate I[0]
212 vaesenc $rndkey,$inout2,$inout2
213 movbe 0x30($in0),%r12
214 vpclmulqdq \$0x11,$T2,$Ii,$T2
215 vaesenc $rndkey,$inout3,$inout3
216 mov %r13,0x40+8(%rsp)
217 vaesenc $rndkey,$inout4,$inout4
218 mov %r12,0x48+8(%rsp)
219 vpxor $Hkey,$Z0,$Z0
220 vmovdqu 0x70-0x20($Xip),$Hkey # $Hkey^6
221 vaesenc $rndkey,$inout5,$inout5
222
223 vmovups 0x60-0x80($key),$rndkey
224 vpxor $Z1,$Z2,$Z2
225 vpclmulqdq \$0x10,$Hkey,$Xi,$Z1
226 vaesenc $rndkey,$inout0,$inout0
227 vpxor $T1,$Z2,$Z2
228 vpclmulqdq \$0x01,$Hkey,$Xi,$T1
229 vaesenc $rndkey,$inout1,$inout1
230 movbe 0x28($in0),%r13
231 vpxor $T2,$Z3,$Z3
232 vpclmulqdq \$0x00,$Hkey,$Xi,$T2
233 vaesenc $rndkey,$inout2,$inout2
234 movbe 0x20($in0),%r12
235 vpclmulqdq \$0x11,$Hkey,$Xi,$Xi
236 vaesenc $rndkey,$inout3,$inout3
237 mov %r13,0x50+8(%rsp)
238 vaesenc $rndkey,$inout4,$inout4
239 mov %r12,0x58+8(%rsp)
240 vpxor $Z1,$Z2,$Z2
241 vaesenc $rndkey,$inout5,$inout5
242 vpxor $T1,$Z2,$Z2
243
244 vmovups 0x70-0x80($key),$rndkey
245 vpslldq \$8,$Z2,$Z1
246 vpxor $T2,$Z0,$Z0
247 vmovdqu 0x10($const),$Hkey # .Lpoly
248
249 vaesenc $rndkey,$inout0,$inout0
250 vpxor $Xi,$Z3,$Z3
251 vaesenc $rndkey,$inout1,$inout1
252 vpxor $Z1,$Z0,$Z0
253 movbe 0x18($in0),%r13
254 vaesenc $rndkey,$inout2,$inout2
255 movbe 0x10($in0),%r12
256 vpalignr \$8,$Z0,$Z0,$Ii # 1st phase
257 vpclmulqdq \$0x10,$Hkey,$Z0,$Z0
258 mov %r13,0x60+8(%rsp)
259 vaesenc $rndkey,$inout3,$inout3
260 mov %r12,0x68+8(%rsp)
261 vaesenc $rndkey,$inout4,$inout4
262 vmovups 0x80-0x80($key),$T1 # borrow $T1 for $rndkey
263 vaesenc $rndkey,$inout5,$inout5
264
265 vaesenc $T1,$inout0,$inout0
266 vmovups 0x90-0x80($key),$rndkey
267 vaesenc $T1,$inout1,$inout1
268 vpsrldq \$8,$Z2,$Z2
269 vaesenc $T1,$inout2,$inout2
270 vpxor $Z2,$Z3,$Z3
271 vaesenc $T1,$inout3,$inout3
272 vpxor $Ii,$Z0,$Z0
273 movbe 0x08($in0),%r13
274 vaesenc $T1,$inout4,$inout4
275 movbe 0x00($in0),%r12
276 vaesenc $T1,$inout5,$inout5
277 vmovups 0xa0-0x80($key),$T1
278 cmp \$11,$rounds
279 jb .Lenc_tail # 128-bit key
280
281 vaesenc $rndkey,$inout0,$inout0
282 vaesenc $rndkey,$inout1,$inout1
283 vaesenc $rndkey,$inout2,$inout2
284 vaesenc $rndkey,$inout3,$inout3
285 vaesenc $rndkey,$inout4,$inout4
286 vaesenc $rndkey,$inout5,$inout5
287
288 vaesenc $T1,$inout0,$inout0
289 vaesenc $T1,$inout1,$inout1
290 vaesenc $T1,$inout2,$inout2
291 vaesenc $T1,$inout3,$inout3
292 vaesenc $T1,$inout4,$inout4
293 vmovups 0xb0-0x80($key),$rndkey
294 vaesenc $T1,$inout5,$inout5
295 vmovups 0xc0-0x80($key),$T1
296 je .Lenc_tail # 192-bit key
297
298 vaesenc $rndkey,$inout0,$inout0
299 vaesenc $rndkey,$inout1,$inout1
300 vaesenc $rndkey,$inout2,$inout2
301 vaesenc $rndkey,$inout3,$inout3
302 vaesenc $rndkey,$inout4,$inout4
303 vaesenc $rndkey,$inout5,$inout5
304
305 vaesenc $T1,$inout0,$inout0
306 vaesenc $T1,$inout1,$inout1
307 vaesenc $T1,$inout2,$inout2
308 vaesenc $T1,$inout3,$inout3
309 vaesenc $T1,$inout4,$inout4
310 vmovups 0xd0-0x80($key),$rndkey
311 vaesenc $T1,$inout5,$inout5
312 vmovups 0xe0-0x80($key),$T1
313 jmp .Lenc_tail # 256-bit key
314
315 .align 32
316 .Lhandle_ctr32:
317 vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
318 vpshufb $Ii,$T1,$Z2 # byte-swap counter
319 vmovdqu 0x30($const),$Z1 # borrow $Z1, .Ltwo_lsb
320 vpaddd 0x40($const),$Z2,$inout1 # .Lone_lsb
321 vpaddd $Z1,$Z2,$inout2
322 vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
323 vpaddd $Z1,$inout1,$inout3
324 vpshufb $Ii,$inout1,$inout1
325 vpaddd $Z1,$inout2,$inout4
326 vpshufb $Ii,$inout2,$inout2
327 vpxor $rndkey,$inout1,$inout1
328 vpaddd $Z1,$inout3,$inout5
329 vpshufb $Ii,$inout3,$inout3
330 vpxor $rndkey,$inout2,$inout2
331 vpaddd $Z1,$inout4,$T1 # byte-swapped next counter value
332 vpshufb $Ii,$inout4,$inout4
333 vpshufb $Ii,$inout5,$inout5
334 vpshufb $Ii,$T1,$T1 # next counter value
335 jmp .Lresume_ctr32
336
337 .align 32
338 .Lenc_tail:
339 vaesenc $rndkey,$inout0,$inout0
340 vmovdqu $Z3,16+8(%rsp) # postpone vpxor $Z3,$Xi,$Xi
341 vpalignr \$8,$Z0,$Z0,$Xi # 2nd phase
342 vaesenc $rndkey,$inout1,$inout1
343 vpclmulqdq \$0x10,$Hkey,$Z0,$Z0
344 vpxor 0x00($inp),$T1,$T2
345 vaesenc $rndkey,$inout2,$inout2
346 vpxor 0x10($inp),$T1,$Ii
347 vaesenc $rndkey,$inout3,$inout3
348 vpxor 0x20($inp),$T1,$Z1
349 vaesenc $rndkey,$inout4,$inout4
350 vpxor 0x30($inp),$T1,$Z2
351 vaesenc $rndkey,$inout5,$inout5
352 vpxor 0x40($inp),$T1,$Z3
353 vpxor 0x50($inp),$T1,$Hkey
354 vmovdqu ($ivp),$T1 # load next counter value
355
356 vaesenclast $T2,$inout0,$inout0
357 vmovdqu 0x20($const),$T2 # borrow $T2, .Lone_msb
358 vaesenclast $Ii,$inout1,$inout1
359 vpaddb $T2,$T1,$Ii
360 mov %r13,0x70+8(%rsp)
361 lea 0x60($inp),$inp
362 vaesenclast $Z1,$inout2,$inout2
363 vpaddb $T2,$Ii,$Z1
364 mov %r12,0x78+8(%rsp)
365 lea 0x60($out),$out
366 vmovdqu 0x00-0x80($key),$rndkey
367 vaesenclast $Z2,$inout3,$inout3
368 vpaddb $T2,$Z1,$Z2
369 vaesenclast $Z3, $inout4,$inout4
370 vpaddb $T2,$Z2,$Z3
371 vaesenclast $Hkey,$inout5,$inout5
372 vpaddb $T2,$Z3,$Hkey
373
374 add \$0x60,$ret
375 sub \$0x6,$len
376 jc .L6x_done
377
378 vmovups $inout0,-0x60($out) # save output
379 vpxor $rndkey,$T1,$inout0
380 vmovups $inout1,-0x50($out)
381 vmovdqa $Ii,$inout1 # 0 latency
382 vmovups $inout2,-0x40($out)
383 vmovdqa $Z1,$inout2 # 0 latency
384 vmovups $inout3,-0x30($out)
385 vmovdqa $Z2,$inout3 # 0 latency
386 vmovups $inout4,-0x20($out)
387 vmovdqa $Z3,$inout4 # 0 latency
388 vmovups $inout5,-0x10($out)
389 vmovdqa $Hkey,$inout5 # 0 latency
390 vmovdqu 0x20+8(%rsp),$Z3 # I[5]
391 jmp .Loop6x
392
393 .L6x_done:
394 vpxor 16+8(%rsp),$Xi,$Xi # modulo-scheduled
395 vpxor $Z0,$Xi,$Xi # modulo-scheduled
396
397 ret
398 .size _aesni_ctr32_ghash_6x,.-_aesni_ctr32_ghash_6x
399 ___
400 ######################################################################
401 #
402 # size_t aesni_gcm_[en|de]crypt(const void *inp, void *out, size_t len,
403 # const AES_KEY *key, unsigned char iv[16],
404 # struct { u128 Xi,H,Htbl[9]; } *Xip);
405 $code.=<<___;
406 .globl aesni_gcm_decrypt
407 .type aesni_gcm_decrypt,\@function,6
408 .align 32
409 aesni_gcm_decrypt:
410 xor $ret,$ret
411 cmp \$0x60,$len # minimal accepted length
412 jb .Lgcm_dec_abort
413
414 lea (%rsp),%rax # save stack pointer
415 push %rbx
416 push %rbp
417 push %r12
418 push %r13
419 push %r14
420 push %r15
421 ___
422 $code.=<<___ if ($win64);
423 lea -0xa8(%rsp),%rsp
424 movaps %xmm6,-0xd8(%rax)
425 movaps %xmm7,-0xc8(%rax)
426 movaps %xmm8,-0xb8(%rax)
427 movaps %xmm9,-0xa8(%rax)
428 movaps %xmm10,-0x98(%rax)
429 movaps %xmm11,-0x88(%rax)
430 movaps %xmm12,-0x78(%rax)
431 movaps %xmm13,-0x68(%rax)
432 movaps %xmm14,-0x58(%rax)
433 movaps %xmm15,-0x48(%rax)
434 .Lgcm_dec_body:
435 ___
436 $code.=<<___;
437 vzeroupper
438
439 vmovdqu ($ivp),$T1 # input counter value
440 add \$-128,%rsp
441 mov 12($ivp),$counter
442 lea .Lbswap_mask(%rip),$const
443 lea -0x80($key),$in0 # borrow $in0
444 mov \$0xf80,$end0 # borrow $end0
445 vmovdqu ($Xip),$Xi # load Xi
446 and \$-128,%rsp # ensure stack alignment
447 vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
448 lea 0x80($key),$key # size optimization
449 lea 0x20+0x20($Xip),$Xip # size optimization
450 mov 0xf0-0x80($key),$rounds
451 vpshufb $Ii,$Xi,$Xi
452
453 and $end0,$in0
454 and %rsp,$end0
455 sub $in0,$end0
456 jc .Ldec_no_key_aliasing
457 cmp \$768,$end0
458 jnc .Ldec_no_key_aliasing
459 sub $end0,%rsp # avoid aliasing with key
460 .Ldec_no_key_aliasing:
461
462 vmovdqu 0x50($inp),$Z3 # I[5]
463 lea ($inp),$in0
464 vmovdqu 0x40($inp),$Z0
465 lea -0xc0($inp,$len),$end0
466 vmovdqu 0x30($inp),$Z1
467 shr \$4,$len
468 xor $ret,$ret
469 vmovdqu 0x20($inp),$Z2
470 vpshufb $Ii,$Z3,$Z3 # passed to _aesni_ctr32_ghash_6x
471 vmovdqu 0x10($inp),$T2
472 vpshufb $Ii,$Z0,$Z0
473 vmovdqu ($inp),$Hkey
474 vpshufb $Ii,$Z1,$Z1
475 vmovdqu $Z0,0x30(%rsp)
476 vpshufb $Ii,$Z2,$Z2
477 vmovdqu $Z1,0x40(%rsp)
478 vpshufb $Ii,$T2,$T2
479 vmovdqu $Z2,0x50(%rsp)
480 vpshufb $Ii,$Hkey,$Hkey
481 vmovdqu $T2,0x60(%rsp)
482 vmovdqu $Hkey,0x70(%rsp)
483
484 call _aesni_ctr32_ghash_6x
485
486 vmovups $inout0,-0x60($out) # save output
487 vmovups $inout1,-0x50($out)
488 vmovups $inout2,-0x40($out)
489 vmovups $inout3,-0x30($out)
490 vmovups $inout4,-0x20($out)
491 vmovups $inout5,-0x10($out)
492
493 vpshufb ($const),$Xi,$Xi # .Lbswap_mask
494 vmovdqu $Xi,-0x40($Xip) # output Xi
495
496 vzeroupper
497 ___
498 $code.=<<___ if ($win64);
499 movaps -0xd8(%rax),%xmm6
500 movaps -0xc8(%rax),%xmm7
501 movaps -0xb8(%rax),%xmm8
502 movaps -0xa8(%rax),%xmm9
503 movaps -0x98(%rax),%xmm10
504 movaps -0x88(%rax),%xmm11
505 movaps -0x78(%rax),%xmm12
506 movaps -0x68(%rax),%xmm13
507 movaps -0x58(%rax),%xmm14
508 movaps -0x48(%rax),%xmm15
509 ___
510 $code.=<<___;
511 mov -48(%rax),%r15
512 mov -40(%rax),%r14
513 mov -32(%rax),%r13
514 mov -24(%rax),%r12
515 mov -16(%rax),%rbp
516 mov -8(%rax),%rbx
517 lea (%rax),%rsp # restore %rsp
518 .Lgcm_dec_abort:
519 mov $ret,%rax # return value
520 ret
521 .size aesni_gcm_decrypt,.-aesni_gcm_decrypt
522 ___
523
524 $code.=<<___;
525 .type _aesni_ctr32_6x,\@abi-omnipotent
526 .align 32
527 _aesni_ctr32_6x:
528 vmovdqu 0x00-0x80($key),$Z0 # borrow $Z0 for $rndkey
529 vmovdqu 0x20($const),$T2 # borrow $T2, .Lone_msb
530 lea -1($rounds),%r13
531 vmovups 0x10-0x80($key),$rndkey
532 lea 0x20-0x80($key),%r12
533 vpxor $Z0,$T1,$inout0
534 add \$`6<<24`,$counter
535 jc .Lhandle_ctr32_2
536 vpaddb $T2,$T1,$inout1
537 vpaddb $T2,$inout1,$inout2
538 vpxor $Z0,$inout1,$inout1
539 vpaddb $T2,$inout2,$inout3
540 vpxor $Z0,$inout2,$inout2
541 vpaddb $T2,$inout3,$inout4
542 vpxor $Z0,$inout3,$inout3
543 vpaddb $T2,$inout4,$inout5
544 vpxor $Z0,$inout4,$inout4
545 vpaddb $T2,$inout5,$T1
546 vpxor $Z0,$inout5,$inout5
547 jmp .Loop_ctr32
548
549 .align 16
550 .Loop_ctr32:
551 vaesenc $rndkey,$inout0,$inout0
552 vaesenc $rndkey,$inout1,$inout1
553 vaesenc $rndkey,$inout2,$inout2
554 vaesenc $rndkey,$inout3,$inout3
555 vaesenc $rndkey,$inout4,$inout4
556 vaesenc $rndkey,$inout5,$inout5
557 vmovups (%r12),$rndkey
558 lea 0x10(%r12),%r12
559 dec %r13d
560 jnz .Loop_ctr32
561
562 vmovdqu (%r12),$Hkey # last round key
563 vaesenc $rndkey,$inout0,$inout0
564 vpxor 0x00($inp),$Hkey,$Z0
565 vaesenc $rndkey,$inout1,$inout1
566 vpxor 0x10($inp),$Hkey,$Z1
567 vaesenc $rndkey,$inout2,$inout2
568 vpxor 0x20($inp),$Hkey,$Z2
569 vaesenc $rndkey,$inout3,$inout3
570 vpxor 0x30($inp),$Hkey,$Xi
571 vaesenc $rndkey,$inout4,$inout4
572 vpxor 0x40($inp),$Hkey,$T2
573 vaesenc $rndkey,$inout5,$inout5
574 vpxor 0x50($inp),$Hkey,$Hkey
575 lea 0x60($inp),$inp
576
577 vaesenclast $Z0,$inout0,$inout0
578 vaesenclast $Z1,$inout1,$inout1
579 vaesenclast $Z2,$inout2,$inout2
580 vaesenclast $Xi,$inout3,$inout3
581 vaesenclast $T2,$inout4,$inout4
582 vaesenclast $Hkey,$inout5,$inout5
583 vmovups $inout0,0x00($out)
584 vmovups $inout1,0x10($out)
585 vmovups $inout2,0x20($out)
586 vmovups $inout3,0x30($out)
587 vmovups $inout4,0x40($out)
588 vmovups $inout5,0x50($out)
589 lea 0x60($out),$out
590
591 ret
592 .align 32
593 .Lhandle_ctr32_2:
594 vpshufb $Ii,$T1,$Z2 # byte-swap counter
595 vmovdqu 0x30($const),$Z1 # borrow $Z1, .Ltwo_lsb
596 vpaddd 0x40($const),$Z2,$inout1 # .Lone_lsb
597 vpaddd $Z1,$Z2,$inout2
598 vpaddd $Z1,$inout1,$inout3
599 vpshufb $Ii,$inout1,$inout1
600 vpaddd $Z1,$inout2,$inout4
601 vpshufb $Ii,$inout2,$inout2
602 vpxor $Z0,$inout1,$inout1
603 vpaddd $Z1,$inout3,$inout5
604 vpshufb $Ii,$inout3,$inout3
605 vpxor $Z0,$inout2,$inout2
606 vpaddd $Z1,$inout4,$T1 # byte-swapped next counter value
607 vpshufb $Ii,$inout4,$inout4
608 vpxor $Z0,$inout3,$inout3
609 vpshufb $Ii,$inout5,$inout5
610 vpxor $Z0,$inout4,$inout4
611 vpshufb $Ii,$T1,$T1 # next counter value
612 vpxor $Z0,$inout5,$inout5
613 jmp .Loop_ctr32
614 .size _aesni_ctr32_6x,.-_aesni_ctr32_6x
615
616 .globl aesni_gcm_encrypt
617 .type aesni_gcm_encrypt,\@function,6
618 .align 32
619 aesni_gcm_encrypt:
620 xor $ret,$ret
621 cmp \$0x60*3,$len # minimal accepted length
622 jb .Lgcm_enc_abort
623
624 lea (%rsp),%rax # save stack pointer
625 push %rbx
626 push %rbp
627 push %r12
628 push %r13
629 push %r14
630 push %r15
631 ___
632 $code.=<<___ if ($win64);
633 lea -0xa8(%rsp),%rsp
634 movaps %xmm6,-0xd8(%rax)
635 movaps %xmm7,-0xc8(%rax)
636 movaps %xmm8,-0xb8(%rax)
637 movaps %xmm9,-0xa8(%rax)
638 movaps %xmm10,-0x98(%rax)
639 movaps %xmm11,-0x88(%rax)
640 movaps %xmm12,-0x78(%rax)
641 movaps %xmm13,-0x68(%rax)
642 movaps %xmm14,-0x58(%rax)
643 movaps %xmm15,-0x48(%rax)
644 .Lgcm_enc_body:
645 ___
646 $code.=<<___;
647 vzeroupper
648
649 vmovdqu ($ivp),$T1 # input counter value
650 add \$-128,%rsp
651 mov 12($ivp),$counter
652 lea .Lbswap_mask(%rip),$const
653 lea -0x80($key),$in0 # borrow $in0
654 mov \$0xf80,$end0 # borrow $end0
655 lea 0x80($key),$key # size optimization
656 vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
657 and \$-128,%rsp # ensure stack alignment
658 mov 0xf0-0x80($key),$rounds
659
660 and $end0,$in0
661 and %rsp,$end0
662 sub $in0,$end0
663 jc .Lenc_no_key_aliasing
664 cmp \$768,$end0
665 jnc .Lenc_no_key_aliasing
666 sub $end0,%rsp # avoid aliasing with key
667 .Lenc_no_key_aliasing:
668
669 lea ($out),$in0
670 lea -0xc0($out,$len),$end0
671 shr \$4,$len
672
673 call _aesni_ctr32_6x
674 vpshufb $Ii,$inout0,$Xi # save bswapped output on stack
675 vpshufb $Ii,$inout1,$T2
676 vmovdqu $Xi,0x70(%rsp)
677 vpshufb $Ii,$inout2,$Z0
678 vmovdqu $T2,0x60(%rsp)
679 vpshufb $Ii,$inout3,$Z1
680 vmovdqu $Z0,0x50(%rsp)
681 vpshufb $Ii,$inout4,$Z2
682 vmovdqu $Z1,0x40(%rsp)
683 vpshufb $Ii,$inout5,$Z3 # passed to _aesni_ctr32_ghash_6x
684 vmovdqu $Z2,0x30(%rsp)
685
686 call _aesni_ctr32_6x
687
688 vmovdqu ($Xip),$Xi # load Xi
689 lea 0x20+0x20($Xip),$Xip # size optimization
690 sub \$12,$len
691 mov \$0x60*2,$ret
692 vpshufb $Ii,$Xi,$Xi
693
694 call _aesni_ctr32_ghash_6x
695 vmovdqu 0x20(%rsp),$Z3 # I[5]
696 vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
697 vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
698 vpunpckhqdq $Z3,$Z3,$T1
699 vmovdqu 0x20-0x20($Xip),$rndkey # borrow $rndkey for $HK
700 vmovups $inout0,-0x60($out) # save output
701 vpshufb $Ii,$inout0,$inout0 # but keep bswapped copy
702 vpxor $Z3,$T1,$T1
703 vmovups $inout1,-0x50($out)
704 vpshufb $Ii,$inout1,$inout1
705 vmovups $inout2,-0x40($out)
706 vpshufb $Ii,$inout2,$inout2
707 vmovups $inout3,-0x30($out)
708 vpshufb $Ii,$inout3,$inout3
709 vmovups $inout4,-0x20($out)
710 vpshufb $Ii,$inout4,$inout4
711 vmovups $inout5,-0x10($out)
712 vpshufb $Ii,$inout5,$inout5
713 vmovdqu $inout0,0x10(%rsp) # free $inout0
714 ___
715 { my ($HK,$T3)=($rndkey,$inout0);
716
717 $code.=<<___;
718 vmovdqu 0x30(%rsp),$Z2 # I[4]
719 vmovdqu 0x10-0x20($Xip),$Ii # borrow $Ii for $Hkey^2
720 vpunpckhqdq $Z2,$Z2,$T2
721 vpclmulqdq \$0x00,$Hkey,$Z3,$Z1
722 vpxor $Z2,$T2,$T2
723 vpclmulqdq \$0x11,$Hkey,$Z3,$Z3
724 vpclmulqdq \$0x00,$HK,$T1,$T1
725
726 vmovdqu 0x40(%rsp),$T3 # I[3]
727 vpclmulqdq \$0x00,$Ii,$Z2,$Z0
728 vmovdqu 0x30-0x20($Xip),$Hkey # $Hkey^3
729 vpxor $Z1,$Z0,$Z0
730 vpunpckhqdq $T3,$T3,$Z1
731 vpclmulqdq \$0x11,$Ii,$Z2,$Z2
732 vpxor $T3,$Z1,$Z1
733 vpxor $Z3,$Z2,$Z2
734 vpclmulqdq \$0x10,$HK,$T2,$T2
735 vmovdqu 0x50-0x20($Xip),$HK
736 vpxor $T1,$T2,$T2
737
738 vmovdqu 0x50(%rsp),$T1 # I[2]
739 vpclmulqdq \$0x00,$Hkey,$T3,$Z3
740 vmovdqu 0x40-0x20($Xip),$Ii # borrow $Ii for $Hkey^4
741 vpxor $Z0,$Z3,$Z3
742 vpunpckhqdq $T1,$T1,$Z0
743 vpclmulqdq \$0x11,$Hkey,$T3,$T3
744 vpxor $T1,$Z0,$Z0
745 vpxor $Z2,$T3,$T3
746 vpclmulqdq \$0x00,$HK,$Z1,$Z1
747 vpxor $T2,$Z1,$Z1
748
749 vmovdqu 0x60(%rsp),$T2 # I[1]
750 vpclmulqdq \$0x00,$Ii,$T1,$Z2
751 vmovdqu 0x60-0x20($Xip),$Hkey # $Hkey^5
752 vpxor $Z3,$Z2,$Z2
753 vpunpckhqdq $T2,$T2,$Z3
754 vpclmulqdq \$0x11,$Ii,$T1,$T1
755 vpxor $T2,$Z3,$Z3
756 vpxor $T3,$T1,$T1
757 vpclmulqdq \$0x10,$HK,$Z0,$Z0
758 vmovdqu 0x80-0x20($Xip),$HK
759 vpxor $Z1,$Z0,$Z0
760
761 vpxor 0x70(%rsp),$Xi,$Xi # accumulate I[0]
762 vpclmulqdq \$0x00,$Hkey,$T2,$Z1
763 vmovdqu 0x70-0x20($Xip),$Ii # borrow $Ii for $Hkey^6
764 vpunpckhqdq $Xi,$Xi,$T3
765 vpxor $Z2,$Z1,$Z1
766 vpclmulqdq \$0x11,$Hkey,$T2,$T2
767 vpxor $Xi,$T3,$T3
768 vpxor $T1,$T2,$T2
769 vpclmulqdq \$0x00,$HK,$Z3,$Z3
770 vpxor $Z0,$Z3,$Z0
771
772 vpclmulqdq \$0x00,$Ii,$Xi,$Z2
773 vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
774 vpunpckhqdq $inout5,$inout5,$T1
775 vpclmulqdq \$0x11,$Ii,$Xi,$Xi
776 vpxor $inout5,$T1,$T1
777 vpxor $Z1,$Z2,$Z1
778 vpclmulqdq \$0x10,$HK,$T3,$T3
779 vmovdqu 0x20-0x20($Xip),$HK
780 vpxor $T2,$Xi,$Z3
781 vpxor $Z0,$T3,$Z2
782
783 vmovdqu 0x10-0x20($Xip),$Ii # borrow $Ii for $Hkey^2
784 vpxor $Z1,$Z3,$T3 # aggregated Karatsuba post-processing
785 vpclmulqdq \$0x00,$Hkey,$inout5,$Z0
786 vpxor $T3,$Z2,$Z2
787 vpunpckhqdq $inout4,$inout4,$T2
788 vpclmulqdq \$0x11,$Hkey,$inout5,$inout5
789 vpxor $inout4,$T2,$T2
790 vpslldq \$8,$Z2,$T3
791 vpclmulqdq \$0x00,$HK,$T1,$T1
792 vpxor $T3,$Z1,$Xi
793 vpsrldq \$8,$Z2,$Z2
794 vpxor $Z2,$Z3,$Z3
795
796 vpclmulqdq \$0x00,$Ii,$inout4,$Z1
797 vmovdqu 0x30-0x20($Xip),$Hkey # $Hkey^3
798 vpxor $Z0,$Z1,$Z1
799 vpunpckhqdq $inout3,$inout3,$T3
800 vpclmulqdq \$0x11,$Ii,$inout4,$inout4
801 vpxor $inout3,$T3,$T3
802 vpxor $inout5,$inout4,$inout4
803 vpalignr \$8,$Xi,$Xi,$inout5 # 1st phase
804 vpclmulqdq \$0x10,$HK,$T2,$T2
805 vmovdqu 0x50-0x20($Xip),$HK
806 vpxor $T1,$T2,$T2
807
808 vpclmulqdq \$0x00,$Hkey,$inout3,$Z0
809 vmovdqu 0x40-0x20($Xip),$Ii # borrow $Ii for $Hkey^4
810 vpxor $Z1,$Z0,$Z0
811 vpunpckhqdq $inout2,$inout2,$T1
812 vpclmulqdq \$0x11,$Hkey,$inout3,$inout3
813 vpxor $inout2,$T1,$T1
814 vpxor $inout4,$inout3,$inout3
815 vxorps 0x10(%rsp),$Z3,$Z3 # accumulate $inout0
816 vpclmulqdq \$0x00,$HK,$T3,$T3
817 vpxor $T2,$T3,$T3
818
819 vpclmulqdq \$0x10,0x10($const),$Xi,$Xi
820 vxorps $inout5,$Xi,$Xi
821
822 vpclmulqdq \$0x00,$Ii,$inout2,$Z1
823 vmovdqu 0x60-0x20($Xip),$Hkey # $Hkey^5
824 vpxor $Z0,$Z1,$Z1
825 vpunpckhqdq $inout1,$inout1,$T2
826 vpclmulqdq \$0x11,$Ii,$inout2,$inout2
827 vpxor $inout1,$T2,$T2
828 vpalignr \$8,$Xi,$Xi,$inout5 # 2nd phase
829 vpxor $inout3,$inout2,$inout2
830 vpclmulqdq \$0x10,$HK,$T1,$T1
831 vmovdqu 0x80-0x20($Xip),$HK
832 vpxor $T3,$T1,$T1
833
834 vxorps $Z3,$inout5,$inout5
835 vpclmulqdq \$0x10,0x10($const),$Xi,$Xi
836 vxorps $inout5,$Xi,$Xi
837
838 vpclmulqdq \$0x00,$Hkey,$inout1,$Z0
839 vmovdqu 0x70-0x20($Xip),$Ii # borrow $Ii for $Hkey^6
840 vpxor $Z1,$Z0,$Z0
841 vpunpckhqdq $Xi,$Xi,$T3
842 vpclmulqdq \$0x11,$Hkey,$inout1,$inout1
843 vpxor $Xi,$T3,$T3
844 vpxor $inout2,$inout1,$inout1
845 vpclmulqdq \$0x00,$HK,$T2,$T2
846 vpxor $T1,$T2,$T2
847
848 vpclmulqdq \$0x00,$Ii,$Xi,$Z1
849 vpclmulqdq \$0x11,$Ii,$Xi,$Z3
850 vpxor $Z0,$Z1,$Z1
851 vpclmulqdq \$0x10,$HK,$T3,$Z2
852 vpxor $inout1,$Z3,$Z3
853 vpxor $T2,$Z2,$Z2
854
855 vpxor $Z1,$Z3,$Z0 # aggregated Karatsuba post-processing
856 vpxor $Z0,$Z2,$Z2
857 vpslldq \$8,$Z2,$T1
858 vmovdqu 0x10($const),$Hkey # .Lpoly
859 vpsrldq \$8,$Z2,$Z2
860 vpxor $T1,$Z1,$Xi
861 vpxor $Z2,$Z3,$Z3
862
863 vpalignr \$8,$Xi,$Xi,$T2 # 1st phase
864 vpclmulqdq \$0x10,$Hkey,$Xi,$Xi
865 vpxor $T2,$Xi,$Xi
866
867 vpalignr \$8,$Xi,$Xi,$T2 # 2nd phase
868 vpclmulqdq \$0x10,$Hkey,$Xi,$Xi
869 vpxor $Z3,$T2,$T2
870 vpxor $T2,$Xi,$Xi
871 ___
872 }
873 $code.=<<___;
874 vpshufb ($const),$Xi,$Xi # .Lbswap_mask
875 vmovdqu $Xi,-0x40($Xip) # output Xi
876
877 vzeroupper
878 ___
879 $code.=<<___ if ($win64);
880 movaps -0xd8(%rax),%xmm6
881 movaps -0xc8(%rax),%xmm7
882 movaps -0xb8(%rax),%xmm8
883 movaps -0xa8(%rax),%xmm9
884 movaps -0x98(%rax),%xmm10
885 movaps -0x88(%rax),%xmm11
886 movaps -0x78(%rax),%xmm12
887 movaps -0x68(%rax),%xmm13
888 movaps -0x58(%rax),%xmm14
889 movaps -0x48(%rax),%xmm15
890 ___
891 $code.=<<___;
892 mov -48(%rax),%r15
893 mov -40(%rax),%r14
894 mov -32(%rax),%r13
895 mov -24(%rax),%r12
896 mov -16(%rax),%rbp
897 mov -8(%rax),%rbx
898 lea (%rax),%rsp # restore %rsp
899 .Lgcm_enc_abort:
900 mov $ret,%rax # return value
901 ret
902 .size aesni_gcm_encrypt,.-aesni_gcm_encrypt
903 ___
904
905 $code.=<<___;
906 .align 64
907 .Lbswap_mask:
908 .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
909 .Lpoly:
910 .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
911 .Lone_msb:
912 .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
913 .Ltwo_lsb:
914 .byte 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
915 .Lone_lsb:
916 .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
917 .asciz "AES-NI GCM module for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
918 .align 64
919 ___
920 if ($win64) {
921 $rec="%rcx";
922 $frame="%rdx";
923 $context="%r8";
924 $disp="%r9";
925
926 $code.=<<___
927 .extern __imp_RtlVirtualUnwind
928 .type gcm_se_handler,\@abi-omnipotent
929 .align 16
930 gcm_se_handler:
931 push %rsi
932 push %rdi
933 push %rbx
934 push %rbp
935 push %r12
936 push %r13
937 push %r14
938 push %r15
939 pushfq
940 sub \$64,%rsp
941
942 mov 120($context),%rax # pull context->Rax
943 mov 248($context),%rbx # pull context->Rip
944
945 mov 8($disp),%rsi # disp->ImageBase
946 mov 56($disp),%r11 # disp->HandlerData
947
948 mov 0(%r11),%r10d # HandlerData[0]
949 lea (%rsi,%r10),%r10 # prologue label
950 cmp %r10,%rbx # context->Rip<prologue label
951 jb .Lcommon_seh_tail
952
953 mov 152($context),%rax # pull context->Rsp
954
955 mov 4(%r11),%r10d # HandlerData[1]
956 lea (%rsi,%r10),%r10 # epilogue label
957 cmp %r10,%rbx # context->Rip>=epilogue label
958 jae .Lcommon_seh_tail
959
960 mov 120($context),%rax # pull context->Rax
961
962 mov -48(%rax),%r15
963 mov -40(%rax),%r14
964 mov -32(%rax),%r13
965 mov -24(%rax),%r12
966 mov -16(%rax),%rbp
967 mov -8(%rax),%rbx
968 mov %r15,240($context)
969 mov %r14,232($context)
970 mov %r13,224($context)
971 mov %r12,216($context)
972 mov %rbp,160($context)
973 mov %rbx,144($context)
974
975 lea -0xd8(%rax),%rsi # %xmm save area
976 lea 512($context),%rdi # & context.Xmm6
977 mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
978 .long 0xa548f3fc # cld; rep movsq
979
980 .Lcommon_seh_tail:
981 mov 8(%rax),%rdi
982 mov 16(%rax),%rsi
983 mov %rax,152($context) # restore context->Rsp
984 mov %rsi,168($context) # restore context->Rsi
985 mov %rdi,176($context) # restore context->Rdi
986
987 mov 40($disp),%rdi # disp->ContextRecord
988 mov $context,%rsi # context
989 mov \$154,%ecx # sizeof(CONTEXT)
990 .long 0xa548f3fc # cld; rep movsq
991
992 mov $disp,%rsi
993 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
994 mov 8(%rsi),%rdx # arg2, disp->ImageBase
995 mov 0(%rsi),%r8 # arg3, disp->ControlPc
996 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
997 mov 40(%rsi),%r10 # disp->ContextRecord
998 lea 56(%rsi),%r11 # &disp->HandlerData
999 lea 24(%rsi),%r12 # &disp->EstablisherFrame
1000 mov %r10,32(%rsp) # arg5
1001 mov %r11,40(%rsp) # arg6
1002 mov %r12,48(%rsp) # arg7
1003 mov %rcx,56(%rsp) # arg8, (NULL)
1004 call *__imp_RtlVirtualUnwind(%rip)
1005
1006 mov \$1,%eax # ExceptionContinueSearch
1007 add \$64,%rsp
1008 popfq
1009 pop %r15
1010 pop %r14
1011 pop %r13
1012 pop %r12
1013 pop %rbp
1014 pop %rbx
1015 pop %rdi
1016 pop %rsi
1017 ret
1018 .size gcm_se_handler,.-gcm_se_handler
1019
1020 .section .pdata
1021 .align 4
1022 .rva .LSEH_begin_aesni_gcm_decrypt
1023 .rva .LSEH_end_aesni_gcm_decrypt
1024 .rva .LSEH_gcm_dec_info
1025
1026 .rva .LSEH_begin_aesni_gcm_encrypt
1027 .rva .LSEH_end_aesni_gcm_encrypt
1028 .rva .LSEH_gcm_enc_info
1029 .section .xdata
1030 .align 8
1031 .LSEH_gcm_dec_info:
1032 .byte 9,0,0,0
1033 .rva gcm_se_handler
1034 .rva .Lgcm_dec_body,.Lgcm_dec_abort
1035 .LSEH_gcm_enc_info:
1036 .byte 9,0,0,0
1037 .rva gcm_se_handler
1038 .rva .Lgcm_enc_body,.Lgcm_enc_abort
1039 ___
1040 }
1041 }}} else {{{
1042 $code=<<___; # assembler is too old
1043 .text
1044
1045 .globl aesni_gcm_encrypt
1046 .type aesni_gcm_encrypt,\@abi-omnipotent
1047 aesni_gcm_encrypt:
1048 xor %eax,%eax
1049 ret
1050 .size aesni_gcm_encrypt,.-aesni_gcm_encrypt
1051
1052 .globl aesni_gcm_decrypt
1053 .type aesni_gcm_decrypt,\@abi-omnipotent
1054 aesni_gcm_decrypt:
1055 xor %eax,%eax
1056 ret
1057 .size aesni_gcm_decrypt,.-aesni_gcm_decrypt
1058 ___
1059 }}}
1060
1061 $code =~ s/\`([^\`]*)\`/eval($1)/gem;
1062
1063 print $code;
1064
1065 close STDOUT;