]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/aes/asm/aesni-x86_64.pl
x86[_64] assembly pack: add optimized AES-NI OCB subroutines.
[thirdparty/openssl.git] / crypto / aes / asm / aesni-x86_64.pl
1 #!/usr/bin/env perl
2 #
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9 #
10 # This module implements support for Intel AES-NI extension. In
11 # OpenSSL context it's used with Intel engine, but can also be used as
12 # drop-in replacement for crypto/aes/asm/aes-x86_64.pl [see below for
13 # details].
14 #
15 # Performance.
16 #
17 # Given aes(enc|dec) instructions' latency asymptotic performance for
18 # non-parallelizable modes such as CBC encrypt is 3.75 cycles per byte
19 # processed with 128-bit key. And given their throughput asymptotic
20 # performance for parallelizable modes is 1.25 cycles per byte. Being
21 # asymptotic limit it's not something you commonly achieve in reality,
22 # but how close does one get? Below are results collected for
23 # different modes and block sized. Pairs of numbers are for en-/
24 # decryption.
25 #
26 # 16-byte 64-byte 256-byte 1-KB 8-KB
27 # ECB 4.25/4.25 1.38/1.38 1.28/1.28 1.26/1.26 1.26/1.26
28 # CTR 5.42/5.42 1.92/1.92 1.44/1.44 1.28/1.28 1.26/1.26
29 # CBC 4.38/4.43 4.15/1.43 4.07/1.32 4.07/1.29 4.06/1.28
30 # CCM 5.66/9.42 4.42/5.41 4.16/4.40 4.09/4.15 4.06/4.07
31 # OFB 5.42/5.42 4.64/4.64 4.44/4.44 4.39/4.39 4.38/4.38
32 # CFB 5.73/5.85 5.56/5.62 5.48/5.56 5.47/5.55 5.47/5.55
33 #
34 # ECB, CTR, CBC and CCM results are free from EVP overhead. This means
35 # that otherwise used 'openssl speed -evp aes-128-??? -engine aesni
36 # [-decrypt]' will exhibit 10-15% worse results for smaller blocks.
37 # The results were collected with specially crafted speed.c benchmark
38 # in order to compare them with results reported in "Intel Advanced
39 # Encryption Standard (AES) New Instruction Set" White Paper Revision
40 # 3.0 dated May 2010. All above results are consistently better. This
41 # module also provides better performance for block sizes smaller than
42 # 128 bytes in points *not* represented in the above table.
43 #
44 # Looking at the results for 8-KB buffer.
45 #
46 # CFB and OFB results are far from the limit, because implementation
47 # uses "generic" CRYPTO_[c|o]fb128_encrypt interfaces relying on
48 # single-block aesni_encrypt, which is not the most optimal way to go.
49 # CBC encrypt result is unexpectedly high and there is no documented
50 # explanation for it. Seemingly there is a small penalty for feeding
51 # the result back to AES unit the way it's done in CBC mode. There is
52 # nothing one can do and the result appears optimal. CCM result is
53 # identical to CBC, because CBC-MAC is essentially CBC encrypt without
54 # saving output. CCM CTR "stays invisible," because it's neatly
55 # interleaved wih CBC-MAC. This provides ~30% improvement over
56 # "straghtforward" CCM implementation with CTR and CBC-MAC performed
57 # disjointly. Parallelizable modes practically achieve the theoretical
58 # limit.
59 #
60 # Looking at how results vary with buffer size.
61 #
62 # Curves are practically saturated at 1-KB buffer size. In most cases
63 # "256-byte" performance is >95%, and "64-byte" is ~90% of "8-KB" one.
64 # CTR curve doesn't follow this pattern and is "slowest" changing one
65 # with "256-byte" result being 87% of "8-KB." This is because overhead
66 # in CTR mode is most computationally intensive. Small-block CCM
67 # decrypt is slower than encrypt, because first CTR and last CBC-MAC
68 # iterations can't be interleaved.
69 #
70 # Results for 192- and 256-bit keys.
71 #
72 # EVP-free results were observed to scale perfectly with number of
73 # rounds for larger block sizes, i.e. 192-bit result being 10/12 times
74 # lower and 256-bit one - 10/14. Well, in CBC encrypt case differences
75 # are a tad smaller, because the above mentioned penalty biases all
76 # results by same constant value. In similar way function call
77 # overhead affects small-block performance, as well as OFB and CFB
78 # results. Differences are not large, most common coefficients are
79 # 10/11.7 and 10/13.4 (as opposite to 10/12.0 and 10/14.0), but one
80 # observe even 10/11.2 and 10/12.4 (CTR, OFB, CFB)...
81
82 # January 2011
83 #
84 # While Westmere processor features 6 cycles latency for aes[enc|dec]
85 # instructions, which can be scheduled every second cycle, Sandy
86 # Bridge spends 8 cycles per instruction, but it can schedule them
87 # every cycle. This means that code targeting Westmere would perform
88 # suboptimally on Sandy Bridge. Therefore this update.
89 #
90 # In addition, non-parallelizable CBC encrypt (as well as CCM) is
91 # optimized. Relative improvement might appear modest, 8% on Westmere,
92 # but in absolute terms it's 3.77 cycles per byte encrypted with
93 # 128-bit key on Westmere, and 5.07 - on Sandy Bridge. These numbers
94 # should be compared to asymptotic limits of 3.75 for Westmere and
95 # 5.00 for Sandy Bridge. Actually, the fact that they get this close
96 # to asymptotic limits is quite amazing. Indeed, the limit is
97 # calculated as latency times number of rounds, 10 for 128-bit key,
98 # and divided by 16, the number of bytes in block, or in other words
99 # it accounts *solely* for aesenc instructions. But there are extra
100 # instructions, and numbers so close to the asymptotic limits mean
101 # that it's as if it takes as little as *one* additional cycle to
102 # execute all of them. How is it possible? It is possible thanks to
103 # out-of-order execution logic, which manages to overlap post-
104 # processing of previous block, things like saving the output, with
105 # actual encryption of current block, as well as pre-processing of
106 # current block, things like fetching input and xor-ing it with
107 # 0-round element of the key schedule, with actual encryption of
108 # previous block. Keep this in mind...
109 #
110 # For parallelizable modes, such as ECB, CBC decrypt, CTR, higher
111 # performance is achieved by interleaving instructions working on
112 # independent blocks. In which case asymptotic limit for such modes
113 # can be obtained by dividing above mentioned numbers by AES
114 # instructions' interleave factor. Westmere can execute at most 3
115 # instructions at a time, meaning that optimal interleave factor is 3,
116 # and that's where the "magic" number of 1.25 come from. "Optimal
117 # interleave factor" means that increase of interleave factor does
118 # not improve performance. The formula has proven to reflect reality
119 # pretty well on Westmere... Sandy Bridge on the other hand can
120 # execute up to 8 AES instructions at a time, so how does varying
121 # interleave factor affect the performance? Here is table for ECB
122 # (numbers are cycles per byte processed with 128-bit key):
123 #
124 # instruction interleave factor 3x 6x 8x
125 # theoretical asymptotic limit 1.67 0.83 0.625
126 # measured performance for 8KB block 1.05 0.86 0.84
127 #
128 # "as if" interleave factor 4.7x 5.8x 6.0x
129 #
130 # Further data for other parallelizable modes:
131 #
132 # CBC decrypt 1.16 0.93 0.74
133 # CTR 1.14 0.91 0.74
134 #
135 # Well, given 3x column it's probably inappropriate to call the limit
136 # asymptotic, if it can be surpassed, isn't it? What happens there?
137 # Rewind to CBC paragraph for the answer. Yes, out-of-order execution
138 # magic is responsible for this. Processor overlaps not only the
139 # additional instructions with AES ones, but even AES instuctions
140 # processing adjacent triplets of independent blocks. In the 6x case
141 # additional instructions still claim disproportionally small amount
142 # of additional cycles, but in 8x case number of instructions must be
143 # a tad too high for out-of-order logic to cope with, and AES unit
144 # remains underutilized... As you can see 8x interleave is hardly
145 # justifiable, so there no need to feel bad that 32-bit aesni-x86.pl
146 # utilizies 6x interleave because of limited register bank capacity.
147 #
148 # Higher interleave factors do have negative impact on Westmere
149 # performance. While for ECB mode it's negligible ~1.5%, other
150 # parallelizables perform ~5% worse, which is outweighed by ~25%
151 # improvement on Sandy Bridge. To balance regression on Westmere
152 # CTR mode was implemented with 6x aesenc interleave factor.
153
154 # April 2011
155 #
156 # Add aesni_xts_[en|de]crypt. Westmere spends 1.25 cycles processing
157 # one byte out of 8KB with 128-bit key, Sandy Bridge - 0.90. Just like
158 # in CTR mode AES instruction interleave factor was chosen to be 6x.
159
160 # November 2015
161 #
162 # Add aesni_ocb_[en|de]crypt. AES instruction interleave factor was
163 # chosen to be 6x.
164
165 ######################################################################
166 # Current large-block performance in cycles per byte processed with
167 # 128-bit key (less is better).
168 #
169 # CBC en-/decrypt CTR XTS ECB OCB
170 # Westmere 3.77/1.25 1.25 1.25 1.26
171 # * Bridge 5.07/0.74 0.75 0.90 0.85 0.98
172 # Haswell 4.44/0.63 0.63 0.73 0.63 0.70
173 # Skylake 2.62/0.63 0.63 0.63 0.63
174 # Silvermont 5.75/3.54 3.56 4.12 3.87(*) 4.11
175 # Bulldozer 5.77/0.70 0.72 0.90 0.70 0.95
176 #
177 # (*) Atom Silvermont ECB result is suboptimal because of penalties
178 # incurred by operations on %xmm8-15. As ECB is not considered
179 # critical, nothing was done to mitigate the problem.
180
181 $PREFIX="aesni"; # if $PREFIX is set to "AES", the script
182 # generates drop-in replacement for
183 # crypto/aes/asm/aes-x86_64.pl:-)
184
185 $flavour = shift;
186 $output = shift;
187 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
188
189 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
190
191 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
192 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
193 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
194 die "can't locate x86_64-xlate.pl";
195
196 open OUT,"| \"$^X\" $xlate $flavour $output";
197 *STDOUT=*OUT;
198
199 $movkey = $PREFIX eq "aesni" ? "movups" : "movups";
200 @_4args=$win64? ("%rcx","%rdx","%r8", "%r9") : # Win64 order
201 ("%rdi","%rsi","%rdx","%rcx"); # Unix order
202
203 $code=".text\n";
204 $code.=".extern OPENSSL_ia32cap_P\n";
205
206 $rounds="%eax"; # input to and changed by aesni_[en|de]cryptN !!!
207 # this is natural Unix argument order for public $PREFIX_[ecb|cbc]_encrypt ...
208 $inp="%rdi";
209 $out="%rsi";
210 $len="%rdx";
211 $key="%rcx"; # input to and changed by aesni_[en|de]cryptN !!!
212 $ivp="%r8"; # cbc, ctr, ...
213
214 $rnds_="%r10d"; # backup copy for $rounds
215 $key_="%r11"; # backup copy for $key
216
217 # %xmm register layout
218 $rndkey0="%xmm0"; $rndkey1="%xmm1";
219 $inout0="%xmm2"; $inout1="%xmm3";
220 $inout2="%xmm4"; $inout3="%xmm5";
221 $inout4="%xmm6"; $inout5="%xmm7";
222 $inout6="%xmm8"; $inout7="%xmm9";
223
224 $in2="%xmm6"; $in1="%xmm7"; # used in CBC decrypt, CTR, ...
225 $in0="%xmm8"; $iv="%xmm9";
226 \f
227 # Inline version of internal aesni_[en|de]crypt1.
228 #
229 # Why folded loop? Because aes[enc|dec] is slow enough to accommodate
230 # cycles which take care of loop variables...
231 { my $sn;
232 sub aesni_generate1 {
233 my ($p,$key,$rounds,$inout,$ivec)=@_; $inout=$inout0 if (!defined($inout));
234 ++$sn;
235 $code.=<<___;
236 $movkey ($key),$rndkey0
237 $movkey 16($key),$rndkey1
238 ___
239 $code.=<<___ if (defined($ivec));
240 xorps $rndkey0,$ivec
241 lea 32($key),$key
242 xorps $ivec,$inout
243 ___
244 $code.=<<___ if (!defined($ivec));
245 lea 32($key),$key
246 xorps $rndkey0,$inout
247 ___
248 $code.=<<___;
249 .Loop_${p}1_$sn:
250 aes${p} $rndkey1,$inout
251 dec $rounds
252 $movkey ($key),$rndkey1
253 lea 16($key),$key
254 jnz .Loop_${p}1_$sn # loop body is 16 bytes
255 aes${p}last $rndkey1,$inout
256 ___
257 }}
258 # void $PREFIX_[en|de]crypt (const void *inp,void *out,const AES_KEY *key);
259 #
260 { my ($inp,$out,$key) = @_4args;
261
262 $code.=<<___;
263 .globl ${PREFIX}_encrypt
264 .type ${PREFIX}_encrypt,\@abi-omnipotent
265 .align 16
266 ${PREFIX}_encrypt:
267 movups ($inp),$inout0 # load input
268 mov 240($key),$rounds # key->rounds
269 ___
270 &aesni_generate1("enc",$key,$rounds);
271 $code.=<<___;
272 pxor $rndkey0,$rndkey0 # clear register bank
273 pxor $rndkey1,$rndkey1
274 movups $inout0,($out) # output
275 pxor $inout0,$inout0
276 ret
277 .size ${PREFIX}_encrypt,.-${PREFIX}_encrypt
278
279 .globl ${PREFIX}_decrypt
280 .type ${PREFIX}_decrypt,\@abi-omnipotent
281 .align 16
282 ${PREFIX}_decrypt:
283 movups ($inp),$inout0 # load input
284 mov 240($key),$rounds # key->rounds
285 ___
286 &aesni_generate1("dec",$key,$rounds);
287 $code.=<<___;
288 pxor $rndkey0,$rndkey0 # clear register bank
289 pxor $rndkey1,$rndkey1
290 movups $inout0,($out) # output
291 pxor $inout0,$inout0
292 ret
293 .size ${PREFIX}_decrypt, .-${PREFIX}_decrypt
294 ___
295 }
296 \f
297 # _aesni_[en|de]cryptN are private interfaces, N denotes interleave
298 # factor. Why 3x subroutine were originally used in loops? Even though
299 # aes[enc|dec] latency was originally 6, it could be scheduled only
300 # every *2nd* cycle. Thus 3x interleave was the one providing optimal
301 # utilization, i.e. when subroutine's throughput is virtually same as
302 # of non-interleaved subroutine [for number of input blocks up to 3].
303 # This is why it originally made no sense to implement 2x subroutine.
304 # But times change and it became appropriate to spend extra 192 bytes
305 # on 2x subroutine on Atom Silvermont account. For processors that
306 # can schedule aes[enc|dec] every cycle optimal interleave factor
307 # equals to corresponding instructions latency. 8x is optimal for
308 # * Bridge and "super-optimal" for other Intel CPUs...
309
310 sub aesni_generate2 {
311 my $dir=shift;
312 # As already mentioned it takes in $key and $rounds, which are *not*
313 # preserved. $inout[0-1] is cipher/clear text...
314 $code.=<<___;
315 .type _aesni_${dir}rypt2,\@abi-omnipotent
316 .align 16
317 _aesni_${dir}rypt2:
318 $movkey ($key),$rndkey0
319 shl \$4,$rounds
320 $movkey 16($key),$rndkey1
321 xorps $rndkey0,$inout0
322 xorps $rndkey0,$inout1
323 $movkey 32($key),$rndkey0
324 lea 32($key,$rounds),$key
325 neg %rax # $rounds
326 add \$16,%rax
327
328 .L${dir}_loop2:
329 aes${dir} $rndkey1,$inout0
330 aes${dir} $rndkey1,$inout1
331 $movkey ($key,%rax),$rndkey1
332 add \$32,%rax
333 aes${dir} $rndkey0,$inout0
334 aes${dir} $rndkey0,$inout1
335 $movkey -16($key,%rax),$rndkey0
336 jnz .L${dir}_loop2
337
338 aes${dir} $rndkey1,$inout0
339 aes${dir} $rndkey1,$inout1
340 aes${dir}last $rndkey0,$inout0
341 aes${dir}last $rndkey0,$inout1
342 ret
343 .size _aesni_${dir}rypt2,.-_aesni_${dir}rypt2
344 ___
345 }
346 sub aesni_generate3 {
347 my $dir=shift;
348 # As already mentioned it takes in $key and $rounds, which are *not*
349 # preserved. $inout[0-2] is cipher/clear text...
350 $code.=<<___;
351 .type _aesni_${dir}rypt3,\@abi-omnipotent
352 .align 16
353 _aesni_${dir}rypt3:
354 $movkey ($key),$rndkey0
355 shl \$4,$rounds
356 $movkey 16($key),$rndkey1
357 xorps $rndkey0,$inout0
358 xorps $rndkey0,$inout1
359 xorps $rndkey0,$inout2
360 $movkey 32($key),$rndkey0
361 lea 32($key,$rounds),$key
362 neg %rax # $rounds
363 add \$16,%rax
364
365 .L${dir}_loop3:
366 aes${dir} $rndkey1,$inout0
367 aes${dir} $rndkey1,$inout1
368 aes${dir} $rndkey1,$inout2
369 $movkey ($key,%rax),$rndkey1
370 add \$32,%rax
371 aes${dir} $rndkey0,$inout0
372 aes${dir} $rndkey0,$inout1
373 aes${dir} $rndkey0,$inout2
374 $movkey -16($key,%rax),$rndkey0
375 jnz .L${dir}_loop3
376
377 aes${dir} $rndkey1,$inout0
378 aes${dir} $rndkey1,$inout1
379 aes${dir} $rndkey1,$inout2
380 aes${dir}last $rndkey0,$inout0
381 aes${dir}last $rndkey0,$inout1
382 aes${dir}last $rndkey0,$inout2
383 ret
384 .size _aesni_${dir}rypt3,.-_aesni_${dir}rypt3
385 ___
386 }
387 # 4x interleave is implemented to improve small block performance,
388 # most notably [and naturally] 4 block by ~30%. One can argue that one
389 # should have implemented 5x as well, but improvement would be <20%,
390 # so it's not worth it...
391 sub aesni_generate4 {
392 my $dir=shift;
393 # As already mentioned it takes in $key and $rounds, which are *not*
394 # preserved. $inout[0-3] is cipher/clear text...
395 $code.=<<___;
396 .type _aesni_${dir}rypt4,\@abi-omnipotent
397 .align 16
398 _aesni_${dir}rypt4:
399 $movkey ($key),$rndkey0
400 shl \$4,$rounds
401 $movkey 16($key),$rndkey1
402 xorps $rndkey0,$inout0
403 xorps $rndkey0,$inout1
404 xorps $rndkey0,$inout2
405 xorps $rndkey0,$inout3
406 $movkey 32($key),$rndkey0
407 lea 32($key,$rounds),$key
408 neg %rax # $rounds
409 .byte 0x0f,0x1f,0x00
410 add \$16,%rax
411
412 .L${dir}_loop4:
413 aes${dir} $rndkey1,$inout0
414 aes${dir} $rndkey1,$inout1
415 aes${dir} $rndkey1,$inout2
416 aes${dir} $rndkey1,$inout3
417 $movkey ($key,%rax),$rndkey1
418 add \$32,%rax
419 aes${dir} $rndkey0,$inout0
420 aes${dir} $rndkey0,$inout1
421 aes${dir} $rndkey0,$inout2
422 aes${dir} $rndkey0,$inout3
423 $movkey -16($key,%rax),$rndkey0
424 jnz .L${dir}_loop4
425
426 aes${dir} $rndkey1,$inout0
427 aes${dir} $rndkey1,$inout1
428 aes${dir} $rndkey1,$inout2
429 aes${dir} $rndkey1,$inout3
430 aes${dir}last $rndkey0,$inout0
431 aes${dir}last $rndkey0,$inout1
432 aes${dir}last $rndkey0,$inout2
433 aes${dir}last $rndkey0,$inout3
434 ret
435 .size _aesni_${dir}rypt4,.-_aesni_${dir}rypt4
436 ___
437 }
438 sub aesni_generate6 {
439 my $dir=shift;
440 # As already mentioned it takes in $key and $rounds, which are *not*
441 # preserved. $inout[0-5] is cipher/clear text...
442 $code.=<<___;
443 .type _aesni_${dir}rypt6,\@abi-omnipotent
444 .align 16
445 _aesni_${dir}rypt6:
446 $movkey ($key),$rndkey0
447 shl \$4,$rounds
448 $movkey 16($key),$rndkey1
449 xorps $rndkey0,$inout0
450 pxor $rndkey0,$inout1
451 pxor $rndkey0,$inout2
452 aes${dir} $rndkey1,$inout0
453 lea 32($key,$rounds),$key
454 neg %rax # $rounds
455 aes${dir} $rndkey1,$inout1
456 pxor $rndkey0,$inout3
457 pxor $rndkey0,$inout4
458 aes${dir} $rndkey1,$inout2
459 pxor $rndkey0,$inout5
460 $movkey ($key,%rax),$rndkey0
461 add \$16,%rax
462 jmp .L${dir}_loop6_enter
463 .align 16
464 .L${dir}_loop6:
465 aes${dir} $rndkey1,$inout0
466 aes${dir} $rndkey1,$inout1
467 aes${dir} $rndkey1,$inout2
468 .L${dir}_loop6_enter:
469 aes${dir} $rndkey1,$inout3
470 aes${dir} $rndkey1,$inout4
471 aes${dir} $rndkey1,$inout5
472 $movkey ($key,%rax),$rndkey1
473 add \$32,%rax
474 aes${dir} $rndkey0,$inout0
475 aes${dir} $rndkey0,$inout1
476 aes${dir} $rndkey0,$inout2
477 aes${dir} $rndkey0,$inout3
478 aes${dir} $rndkey0,$inout4
479 aes${dir} $rndkey0,$inout5
480 $movkey -16($key,%rax),$rndkey0
481 jnz .L${dir}_loop6
482
483 aes${dir} $rndkey1,$inout0
484 aes${dir} $rndkey1,$inout1
485 aes${dir} $rndkey1,$inout2
486 aes${dir} $rndkey1,$inout3
487 aes${dir} $rndkey1,$inout4
488 aes${dir} $rndkey1,$inout5
489 aes${dir}last $rndkey0,$inout0
490 aes${dir}last $rndkey0,$inout1
491 aes${dir}last $rndkey0,$inout2
492 aes${dir}last $rndkey0,$inout3
493 aes${dir}last $rndkey0,$inout4
494 aes${dir}last $rndkey0,$inout5
495 ret
496 .size _aesni_${dir}rypt6,.-_aesni_${dir}rypt6
497 ___
498 }
499 sub aesni_generate8 {
500 my $dir=shift;
501 # As already mentioned it takes in $key and $rounds, which are *not*
502 # preserved. $inout[0-7] is cipher/clear text...
503 $code.=<<___;
504 .type _aesni_${dir}rypt8,\@abi-omnipotent
505 .align 16
506 _aesni_${dir}rypt8:
507 $movkey ($key),$rndkey0
508 shl \$4,$rounds
509 $movkey 16($key),$rndkey1
510 xorps $rndkey0,$inout0
511 xorps $rndkey0,$inout1
512 pxor $rndkey0,$inout2
513 pxor $rndkey0,$inout3
514 pxor $rndkey0,$inout4
515 lea 32($key,$rounds),$key
516 neg %rax # $rounds
517 aes${dir} $rndkey1,$inout0
518 pxor $rndkey0,$inout5
519 pxor $rndkey0,$inout6
520 aes${dir} $rndkey1,$inout1
521 pxor $rndkey0,$inout7
522 $movkey ($key,%rax),$rndkey0
523 add \$16,%rax
524 jmp .L${dir}_loop8_inner
525 .align 16
526 .L${dir}_loop8:
527 aes${dir} $rndkey1,$inout0
528 aes${dir} $rndkey1,$inout1
529 .L${dir}_loop8_inner:
530 aes${dir} $rndkey1,$inout2
531 aes${dir} $rndkey1,$inout3
532 aes${dir} $rndkey1,$inout4
533 aes${dir} $rndkey1,$inout5
534 aes${dir} $rndkey1,$inout6
535 aes${dir} $rndkey1,$inout7
536 .L${dir}_loop8_enter:
537 $movkey ($key,%rax),$rndkey1
538 add \$32,%rax
539 aes${dir} $rndkey0,$inout0
540 aes${dir} $rndkey0,$inout1
541 aes${dir} $rndkey0,$inout2
542 aes${dir} $rndkey0,$inout3
543 aes${dir} $rndkey0,$inout4
544 aes${dir} $rndkey0,$inout5
545 aes${dir} $rndkey0,$inout6
546 aes${dir} $rndkey0,$inout7
547 $movkey -16($key,%rax),$rndkey0
548 jnz .L${dir}_loop8
549
550 aes${dir} $rndkey1,$inout0
551 aes${dir} $rndkey1,$inout1
552 aes${dir} $rndkey1,$inout2
553 aes${dir} $rndkey1,$inout3
554 aes${dir} $rndkey1,$inout4
555 aes${dir} $rndkey1,$inout5
556 aes${dir} $rndkey1,$inout6
557 aes${dir} $rndkey1,$inout7
558 aes${dir}last $rndkey0,$inout0
559 aes${dir}last $rndkey0,$inout1
560 aes${dir}last $rndkey0,$inout2
561 aes${dir}last $rndkey0,$inout3
562 aes${dir}last $rndkey0,$inout4
563 aes${dir}last $rndkey0,$inout5
564 aes${dir}last $rndkey0,$inout6
565 aes${dir}last $rndkey0,$inout7
566 ret
567 .size _aesni_${dir}rypt8,.-_aesni_${dir}rypt8
568 ___
569 }
570 &aesni_generate2("enc") if ($PREFIX eq "aesni");
571 &aesni_generate2("dec");
572 &aesni_generate3("enc") if ($PREFIX eq "aesni");
573 &aesni_generate3("dec");
574 &aesni_generate4("enc") if ($PREFIX eq "aesni");
575 &aesni_generate4("dec");
576 &aesni_generate6("enc") if ($PREFIX eq "aesni");
577 &aesni_generate6("dec");
578 &aesni_generate8("enc") if ($PREFIX eq "aesni");
579 &aesni_generate8("dec");
580 \f
581 if ($PREFIX eq "aesni") {
582 ########################################################################
583 # void aesni_ecb_encrypt (const void *in, void *out,
584 # size_t length, const AES_KEY *key,
585 # int enc);
586 $code.=<<___;
587 .globl aesni_ecb_encrypt
588 .type aesni_ecb_encrypt,\@function,5
589 .align 16
590 aesni_ecb_encrypt:
591 ___
592 $code.=<<___ if ($win64);
593 lea -0x58(%rsp),%rsp
594 movaps %xmm6,(%rsp) # offload $inout4..7
595 movaps %xmm7,0x10(%rsp)
596 movaps %xmm8,0x20(%rsp)
597 movaps %xmm9,0x30(%rsp)
598 .Lecb_enc_body:
599 ___
600 $code.=<<___;
601 and \$-16,$len # if ($len<16)
602 jz .Lecb_ret # return
603
604 mov 240($key),$rounds # key->rounds
605 $movkey ($key),$rndkey0
606 mov $key,$key_ # backup $key
607 mov $rounds,$rnds_ # backup $rounds
608 test %r8d,%r8d # 5th argument
609 jz .Lecb_decrypt
610 #--------------------------- ECB ENCRYPT ------------------------------#
611 cmp \$0x80,$len # if ($len<8*16)
612 jb .Lecb_enc_tail # short input
613
614 movdqu ($inp),$inout0 # load 8 input blocks
615 movdqu 0x10($inp),$inout1
616 movdqu 0x20($inp),$inout2
617 movdqu 0x30($inp),$inout3
618 movdqu 0x40($inp),$inout4
619 movdqu 0x50($inp),$inout5
620 movdqu 0x60($inp),$inout6
621 movdqu 0x70($inp),$inout7
622 lea 0x80($inp),$inp # $inp+=8*16
623 sub \$0x80,$len # $len-=8*16 (can be zero)
624 jmp .Lecb_enc_loop8_enter
625 .align 16
626 .Lecb_enc_loop8:
627 movups $inout0,($out) # store 8 output blocks
628 mov $key_,$key # restore $key
629 movdqu ($inp),$inout0 # load 8 input blocks
630 mov $rnds_,$rounds # restore $rounds
631 movups $inout1,0x10($out)
632 movdqu 0x10($inp),$inout1
633 movups $inout2,0x20($out)
634 movdqu 0x20($inp),$inout2
635 movups $inout3,0x30($out)
636 movdqu 0x30($inp),$inout3
637 movups $inout4,0x40($out)
638 movdqu 0x40($inp),$inout4
639 movups $inout5,0x50($out)
640 movdqu 0x50($inp),$inout5
641 movups $inout6,0x60($out)
642 movdqu 0x60($inp),$inout6
643 movups $inout7,0x70($out)
644 lea 0x80($out),$out # $out+=8*16
645 movdqu 0x70($inp),$inout7
646 lea 0x80($inp),$inp # $inp+=8*16
647 .Lecb_enc_loop8_enter:
648
649 call _aesni_encrypt8
650
651 sub \$0x80,$len
652 jnc .Lecb_enc_loop8 # loop if $len-=8*16 didn't borrow
653
654 movups $inout0,($out) # store 8 output blocks
655 mov $key_,$key # restore $key
656 movups $inout1,0x10($out)
657 mov $rnds_,$rounds # restore $rounds
658 movups $inout2,0x20($out)
659 movups $inout3,0x30($out)
660 movups $inout4,0x40($out)
661 movups $inout5,0x50($out)
662 movups $inout6,0x60($out)
663 movups $inout7,0x70($out)
664 lea 0x80($out),$out # $out+=8*16
665 add \$0x80,$len # restore real remaining $len
666 jz .Lecb_ret # done if ($len==0)
667
668 .Lecb_enc_tail: # $len is less than 8*16
669 movups ($inp),$inout0
670 cmp \$0x20,$len
671 jb .Lecb_enc_one
672 movups 0x10($inp),$inout1
673 je .Lecb_enc_two
674 movups 0x20($inp),$inout2
675 cmp \$0x40,$len
676 jb .Lecb_enc_three
677 movups 0x30($inp),$inout3
678 je .Lecb_enc_four
679 movups 0x40($inp),$inout4
680 cmp \$0x60,$len
681 jb .Lecb_enc_five
682 movups 0x50($inp),$inout5
683 je .Lecb_enc_six
684 movdqu 0x60($inp),$inout6
685 xorps $inout7,$inout7
686 call _aesni_encrypt8
687 movups $inout0,($out) # store 7 output blocks
688 movups $inout1,0x10($out)
689 movups $inout2,0x20($out)
690 movups $inout3,0x30($out)
691 movups $inout4,0x40($out)
692 movups $inout5,0x50($out)
693 movups $inout6,0x60($out)
694 jmp .Lecb_ret
695 .align 16
696 .Lecb_enc_one:
697 ___
698 &aesni_generate1("enc",$key,$rounds);
699 $code.=<<___;
700 movups $inout0,($out) # store one output block
701 jmp .Lecb_ret
702 .align 16
703 .Lecb_enc_two:
704 call _aesni_encrypt2
705 movups $inout0,($out) # store 2 output blocks
706 movups $inout1,0x10($out)
707 jmp .Lecb_ret
708 .align 16
709 .Lecb_enc_three:
710 call _aesni_encrypt3
711 movups $inout0,($out) # store 3 output blocks
712 movups $inout1,0x10($out)
713 movups $inout2,0x20($out)
714 jmp .Lecb_ret
715 .align 16
716 .Lecb_enc_four:
717 call _aesni_encrypt4
718 movups $inout0,($out) # store 4 output blocks
719 movups $inout1,0x10($out)
720 movups $inout2,0x20($out)
721 movups $inout3,0x30($out)
722 jmp .Lecb_ret
723 .align 16
724 .Lecb_enc_five:
725 xorps $inout5,$inout5
726 call _aesni_encrypt6
727 movups $inout0,($out) # store 5 output blocks
728 movups $inout1,0x10($out)
729 movups $inout2,0x20($out)
730 movups $inout3,0x30($out)
731 movups $inout4,0x40($out)
732 jmp .Lecb_ret
733 .align 16
734 .Lecb_enc_six:
735 call _aesni_encrypt6
736 movups $inout0,($out) # store 6 output blocks
737 movups $inout1,0x10($out)
738 movups $inout2,0x20($out)
739 movups $inout3,0x30($out)
740 movups $inout4,0x40($out)
741 movups $inout5,0x50($out)
742 jmp .Lecb_ret
743 \f#--------------------------- ECB DECRYPT ------------------------------#
744 .align 16
745 .Lecb_decrypt:
746 cmp \$0x80,$len # if ($len<8*16)
747 jb .Lecb_dec_tail # short input
748
749 movdqu ($inp),$inout0 # load 8 input blocks
750 movdqu 0x10($inp),$inout1
751 movdqu 0x20($inp),$inout2
752 movdqu 0x30($inp),$inout3
753 movdqu 0x40($inp),$inout4
754 movdqu 0x50($inp),$inout5
755 movdqu 0x60($inp),$inout6
756 movdqu 0x70($inp),$inout7
757 lea 0x80($inp),$inp # $inp+=8*16
758 sub \$0x80,$len # $len-=8*16 (can be zero)
759 jmp .Lecb_dec_loop8_enter
760 .align 16
761 .Lecb_dec_loop8:
762 movups $inout0,($out) # store 8 output blocks
763 mov $key_,$key # restore $key
764 movdqu ($inp),$inout0 # load 8 input blocks
765 mov $rnds_,$rounds # restore $rounds
766 movups $inout1,0x10($out)
767 movdqu 0x10($inp),$inout1
768 movups $inout2,0x20($out)
769 movdqu 0x20($inp),$inout2
770 movups $inout3,0x30($out)
771 movdqu 0x30($inp),$inout3
772 movups $inout4,0x40($out)
773 movdqu 0x40($inp),$inout4
774 movups $inout5,0x50($out)
775 movdqu 0x50($inp),$inout5
776 movups $inout6,0x60($out)
777 movdqu 0x60($inp),$inout6
778 movups $inout7,0x70($out)
779 lea 0x80($out),$out # $out+=8*16
780 movdqu 0x70($inp),$inout7
781 lea 0x80($inp),$inp # $inp+=8*16
782 .Lecb_dec_loop8_enter:
783
784 call _aesni_decrypt8
785
786 $movkey ($key_),$rndkey0
787 sub \$0x80,$len
788 jnc .Lecb_dec_loop8 # loop if $len-=8*16 didn't borrow
789
790 movups $inout0,($out) # store 8 output blocks
791 pxor $inout0,$inout0 # clear register bank
792 mov $key_,$key # restore $key
793 movups $inout1,0x10($out)
794 pxor $inout1,$inout1
795 mov $rnds_,$rounds # restore $rounds
796 movups $inout2,0x20($out)
797 pxor $inout2,$inout2
798 movups $inout3,0x30($out)
799 pxor $inout3,$inout3
800 movups $inout4,0x40($out)
801 pxor $inout4,$inout4
802 movups $inout5,0x50($out)
803 pxor $inout5,$inout5
804 movups $inout6,0x60($out)
805 pxor $inout6,$inout6
806 movups $inout7,0x70($out)
807 pxor $inout7,$inout7
808 lea 0x80($out),$out # $out+=8*16
809 add \$0x80,$len # restore real remaining $len
810 jz .Lecb_ret # done if ($len==0)
811
812 .Lecb_dec_tail:
813 movups ($inp),$inout0
814 cmp \$0x20,$len
815 jb .Lecb_dec_one
816 movups 0x10($inp),$inout1
817 je .Lecb_dec_two
818 movups 0x20($inp),$inout2
819 cmp \$0x40,$len
820 jb .Lecb_dec_three
821 movups 0x30($inp),$inout3
822 je .Lecb_dec_four
823 movups 0x40($inp),$inout4
824 cmp \$0x60,$len
825 jb .Lecb_dec_five
826 movups 0x50($inp),$inout5
827 je .Lecb_dec_six
828 movups 0x60($inp),$inout6
829 $movkey ($key),$rndkey0
830 xorps $inout7,$inout7
831 call _aesni_decrypt8
832 movups $inout0,($out) # store 7 output blocks
833 pxor $inout0,$inout0 # clear register bank
834 movups $inout1,0x10($out)
835 pxor $inout1,$inout1
836 movups $inout2,0x20($out)
837 pxor $inout2,$inout2
838 movups $inout3,0x30($out)
839 pxor $inout3,$inout3
840 movups $inout4,0x40($out)
841 pxor $inout4,$inout4
842 movups $inout5,0x50($out)
843 pxor $inout5,$inout5
844 movups $inout6,0x60($out)
845 pxor $inout6,$inout6
846 pxor $inout7,$inout7
847 jmp .Lecb_ret
848 .align 16
849 .Lecb_dec_one:
850 ___
851 &aesni_generate1("dec",$key,$rounds);
852 $code.=<<___;
853 movups $inout0,($out) # store one output block
854 pxor $inout0,$inout0 # clear register bank
855 jmp .Lecb_ret
856 .align 16
857 .Lecb_dec_two:
858 call _aesni_decrypt2
859 movups $inout0,($out) # store 2 output blocks
860 pxor $inout0,$inout0 # clear register bank
861 movups $inout1,0x10($out)
862 pxor $inout1,$inout1
863 jmp .Lecb_ret
864 .align 16
865 .Lecb_dec_three:
866 call _aesni_decrypt3
867 movups $inout0,($out) # store 3 output blocks
868 pxor $inout0,$inout0 # clear register bank
869 movups $inout1,0x10($out)
870 pxor $inout1,$inout1
871 movups $inout2,0x20($out)
872 pxor $inout2,$inout2
873 jmp .Lecb_ret
874 .align 16
875 .Lecb_dec_four:
876 call _aesni_decrypt4
877 movups $inout0,($out) # store 4 output blocks
878 pxor $inout0,$inout0 # clear register bank
879 movups $inout1,0x10($out)
880 pxor $inout1,$inout1
881 movups $inout2,0x20($out)
882 pxor $inout2,$inout2
883 movups $inout3,0x30($out)
884 pxor $inout3,$inout3
885 jmp .Lecb_ret
886 .align 16
887 .Lecb_dec_five:
888 xorps $inout5,$inout5
889 call _aesni_decrypt6
890 movups $inout0,($out) # store 5 output blocks
891 pxor $inout0,$inout0 # clear register bank
892 movups $inout1,0x10($out)
893 pxor $inout1,$inout1
894 movups $inout2,0x20($out)
895 pxor $inout2,$inout2
896 movups $inout3,0x30($out)
897 pxor $inout3,$inout3
898 movups $inout4,0x40($out)
899 pxor $inout4,$inout4
900 pxor $inout5,$inout5
901 jmp .Lecb_ret
902 .align 16
903 .Lecb_dec_six:
904 call _aesni_decrypt6
905 movups $inout0,($out) # store 6 output blocks
906 pxor $inout0,$inout0 # clear register bank
907 movups $inout1,0x10($out)
908 pxor $inout1,$inout1
909 movups $inout2,0x20($out)
910 pxor $inout2,$inout2
911 movups $inout3,0x30($out)
912 pxor $inout3,$inout3
913 movups $inout4,0x40($out)
914 pxor $inout4,$inout4
915 movups $inout5,0x50($out)
916 pxor $inout5,$inout5
917
918 .Lecb_ret:
919 xorps $rndkey0,$rndkey0 # %xmm0
920 pxor $rndkey1,$rndkey1
921 ___
922 $code.=<<___ if ($win64);
923 movaps (%rsp),%xmm6
924 movaps %xmm0,(%rsp) # clear stack
925 movaps 0x10(%rsp),%xmm7
926 movaps %xmm0,0x10(%rsp)
927 movaps 0x20(%rsp),%xmm8
928 movaps %xmm0,0x20(%rsp)
929 movaps 0x30(%rsp),%xmm9
930 movaps %xmm0,0x30(%rsp)
931 lea 0x58(%rsp),%rsp
932 .Lecb_enc_ret:
933 ___
934 $code.=<<___;
935 ret
936 .size aesni_ecb_encrypt,.-aesni_ecb_encrypt
937 ___
938 \f
939 {
940 ######################################################################
941 # void aesni_ccm64_[en|de]crypt_blocks (const void *in, void *out,
942 # size_t blocks, const AES_KEY *key,
943 # const char *ivec,char *cmac);
944 #
945 # Handles only complete blocks, operates on 64-bit counter and
946 # does not update *ivec! Nor does it finalize CMAC value
947 # (see engine/eng_aesni.c for details)
948 #
949 {
950 my $cmac="%r9"; # 6th argument
951
952 my $increment="%xmm9";
953 my $iv="%xmm6";
954 my $bswap_mask="%xmm7";
955
956 $code.=<<___;
957 .globl aesni_ccm64_encrypt_blocks
958 .type aesni_ccm64_encrypt_blocks,\@function,6
959 .align 16
960 aesni_ccm64_encrypt_blocks:
961 ___
962 $code.=<<___ if ($win64);
963 lea -0x58(%rsp),%rsp
964 movaps %xmm6,(%rsp) # $iv
965 movaps %xmm7,0x10(%rsp) # $bswap_mask
966 movaps %xmm8,0x20(%rsp) # $in0
967 movaps %xmm9,0x30(%rsp) # $increment
968 .Lccm64_enc_body:
969 ___
970 $code.=<<___;
971 mov 240($key),$rounds # key->rounds
972 movdqu ($ivp),$iv
973 movdqa .Lincrement64(%rip),$increment
974 movdqa .Lbswap_mask(%rip),$bswap_mask
975
976 shl \$4,$rounds
977 mov \$16,$rnds_
978 lea 0($key),$key_
979 movdqu ($cmac),$inout1
980 movdqa $iv,$inout0
981 lea 32($key,$rounds),$key # end of key schedule
982 pshufb $bswap_mask,$iv
983 sub %rax,%r10 # twisted $rounds
984 jmp .Lccm64_enc_outer
985 .align 16
986 .Lccm64_enc_outer:
987 $movkey ($key_),$rndkey0
988 mov %r10,%rax
989 movups ($inp),$in0 # load inp
990
991 xorps $rndkey0,$inout0 # counter
992 $movkey 16($key_),$rndkey1
993 xorps $in0,$rndkey0
994 xorps $rndkey0,$inout1 # cmac^=inp
995 $movkey 32($key_),$rndkey0
996
997 .Lccm64_enc2_loop:
998 aesenc $rndkey1,$inout0
999 aesenc $rndkey1,$inout1
1000 $movkey ($key,%rax),$rndkey1
1001 add \$32,%rax
1002 aesenc $rndkey0,$inout0
1003 aesenc $rndkey0,$inout1
1004 $movkey -16($key,%rax),$rndkey0
1005 jnz .Lccm64_enc2_loop
1006 aesenc $rndkey1,$inout0
1007 aesenc $rndkey1,$inout1
1008 paddq $increment,$iv
1009 dec $len # $len-- ($len is in blocks)
1010 aesenclast $rndkey0,$inout0
1011 aesenclast $rndkey0,$inout1
1012
1013 lea 16($inp),$inp
1014 xorps $inout0,$in0 # inp ^= E(iv)
1015 movdqa $iv,$inout0
1016 movups $in0,($out) # save output
1017 pshufb $bswap_mask,$inout0
1018 lea 16($out),$out # $out+=16
1019 jnz .Lccm64_enc_outer # loop if ($len!=0)
1020
1021 pxor $rndkey0,$rndkey0 # clear register bank
1022 pxor $rndkey1,$rndkey1
1023 pxor $inout0,$inout0
1024 movups $inout1,($cmac) # store resulting mac
1025 pxor $inout1,$inout1
1026 pxor $in0,$in0
1027 pxor $iv,$iv
1028 ___
1029 $code.=<<___ if ($win64);
1030 movaps (%rsp),%xmm6
1031 movaps %xmm0,(%rsp) # clear stack
1032 movaps 0x10(%rsp),%xmm7
1033 movaps %xmm0,0x10(%rsp)
1034 movaps 0x20(%rsp),%xmm8
1035 movaps %xmm0,0x20(%rsp)
1036 movaps 0x30(%rsp),%xmm9
1037 movaps %xmm0,0x30(%rsp)
1038 lea 0x58(%rsp),%rsp
1039 .Lccm64_enc_ret:
1040 ___
1041 $code.=<<___;
1042 ret
1043 .size aesni_ccm64_encrypt_blocks,.-aesni_ccm64_encrypt_blocks
1044 ___
1045 ######################################################################
1046 $code.=<<___;
1047 .globl aesni_ccm64_decrypt_blocks
1048 .type aesni_ccm64_decrypt_blocks,\@function,6
1049 .align 16
1050 aesni_ccm64_decrypt_blocks:
1051 ___
1052 $code.=<<___ if ($win64);
1053 lea -0x58(%rsp),%rsp
1054 movaps %xmm6,(%rsp) # $iv
1055 movaps %xmm7,0x10(%rsp) # $bswap_mask
1056 movaps %xmm8,0x20(%rsp) # $in8
1057 movaps %xmm9,0x30(%rsp) # $increment
1058 .Lccm64_dec_body:
1059 ___
1060 $code.=<<___;
1061 mov 240($key),$rounds # key->rounds
1062 movups ($ivp),$iv
1063 movdqu ($cmac),$inout1
1064 movdqa .Lincrement64(%rip),$increment
1065 movdqa .Lbswap_mask(%rip),$bswap_mask
1066
1067 movaps $iv,$inout0
1068 mov $rounds,$rnds_
1069 mov $key,$key_
1070 pshufb $bswap_mask,$iv
1071 ___
1072 &aesni_generate1("enc",$key,$rounds);
1073 $code.=<<___;
1074 shl \$4,$rnds_
1075 mov \$16,$rounds
1076 movups ($inp),$in0 # load inp
1077 paddq $increment,$iv
1078 lea 16($inp),$inp # $inp+=16
1079 sub %r10,%rax # twisted $rounds
1080 lea 32($key_,$rnds_),$key # end of key schedule
1081 mov %rax,%r10
1082 jmp .Lccm64_dec_outer
1083 .align 16
1084 .Lccm64_dec_outer:
1085 xorps $inout0,$in0 # inp ^= E(iv)
1086 movdqa $iv,$inout0
1087 movups $in0,($out) # save output
1088 lea 16($out),$out # $out+=16
1089 pshufb $bswap_mask,$inout0
1090
1091 sub \$1,$len # $len-- ($len is in blocks)
1092 jz .Lccm64_dec_break # if ($len==0) break
1093
1094 $movkey ($key_),$rndkey0
1095 mov %r10,%rax
1096 $movkey 16($key_),$rndkey1
1097 xorps $rndkey0,$in0
1098 xorps $rndkey0,$inout0
1099 xorps $in0,$inout1 # cmac^=out
1100 $movkey 32($key_),$rndkey0
1101 jmp .Lccm64_dec2_loop
1102 .align 16
1103 .Lccm64_dec2_loop:
1104 aesenc $rndkey1,$inout0
1105 aesenc $rndkey1,$inout1
1106 $movkey ($key,%rax),$rndkey1
1107 add \$32,%rax
1108 aesenc $rndkey0,$inout0
1109 aesenc $rndkey0,$inout1
1110 $movkey -16($key,%rax),$rndkey0
1111 jnz .Lccm64_dec2_loop
1112 movups ($inp),$in0 # load input
1113 paddq $increment,$iv
1114 aesenc $rndkey1,$inout0
1115 aesenc $rndkey1,$inout1
1116 aesenclast $rndkey0,$inout0
1117 aesenclast $rndkey0,$inout1
1118 lea 16($inp),$inp # $inp+=16
1119 jmp .Lccm64_dec_outer
1120
1121 .align 16
1122 .Lccm64_dec_break:
1123 #xorps $in0,$inout1 # cmac^=out
1124 mov 240($key_),$rounds
1125 ___
1126 &aesni_generate1("enc",$key_,$rounds,$inout1,$in0);
1127 $code.=<<___;
1128 pxor $rndkey0,$rndkey0 # clear register bank
1129 pxor $rndkey1,$rndkey1
1130 pxor $inout0,$inout0
1131 movups $inout1,($cmac) # store resulting mac
1132 pxor $inout1,$inout1
1133 pxor $in0,$in0
1134 pxor $iv,$iv
1135 ___
1136 $code.=<<___ if ($win64);
1137 movaps (%rsp),%xmm6
1138 movaps %xmm0,(%rsp) # clear stack
1139 movaps 0x10(%rsp),%xmm7
1140 movaps %xmm0,0x10(%rsp)
1141 movaps 0x20(%rsp),%xmm8
1142 movaps %xmm0,0x20(%rsp)
1143 movaps 0x30(%rsp),%xmm9
1144 movaps %xmm0,0x30(%rsp)
1145 lea 0x58(%rsp),%rsp
1146 .Lccm64_dec_ret:
1147 ___
1148 $code.=<<___;
1149 ret
1150 .size aesni_ccm64_decrypt_blocks,.-aesni_ccm64_decrypt_blocks
1151 ___
1152 }\f
1153 ######################################################################
1154 # void aesni_ctr32_encrypt_blocks (const void *in, void *out,
1155 # size_t blocks, const AES_KEY *key,
1156 # const char *ivec);
1157 #
1158 # Handles only complete blocks, operates on 32-bit counter and
1159 # does not update *ivec! (see crypto/modes/ctr128.c for details)
1160 #
1161 # Overhaul based on suggestions from Shay Gueron and Vlad Krasnov,
1162 # http://rt.openssl.org/Ticket/Display.html?id=3021&user=guest&pass=guest.
1163 # Keywords are full unroll and modulo-schedule counter calculations
1164 # with zero-round key xor.
1165 {
1166 my ($in0,$in1,$in2,$in3,$in4,$in5)=map("%xmm$_",(10..15));
1167 my ($key0,$ctr)=("${key_}d","${ivp}d");
1168 my $frame_size = 0x80 + ($win64?160:0);
1169
1170 $code.=<<___;
1171 .globl aesni_ctr32_encrypt_blocks
1172 .type aesni_ctr32_encrypt_blocks,\@function,5
1173 .align 16
1174 aesni_ctr32_encrypt_blocks:
1175 cmp \$1,$len
1176 jne .Lctr32_bulk
1177
1178 # handle single block without allocating stack frame,
1179 # useful when handling edges
1180 movups ($ivp),$inout0
1181 movups ($inp),$inout1
1182 mov 240($key),%edx # key->rounds
1183 ___
1184 &aesni_generate1("enc",$key,"%edx");
1185 $code.=<<___;
1186 pxor $rndkey0,$rndkey0 # clear register bank
1187 pxor $rndkey1,$rndkey1
1188 xorps $inout1,$inout0
1189 pxor $inout1,$inout1
1190 movups $inout0,($out)
1191 xorps $inout0,$inout0
1192 jmp .Lctr32_epilogue
1193
1194 .align 16
1195 .Lctr32_bulk:
1196 lea (%rsp),%rax
1197 push %rbp
1198 sub \$$frame_size,%rsp
1199 and \$-16,%rsp # Linux kernel stack can be incorrectly seeded
1200 ___
1201 $code.=<<___ if ($win64);
1202 movaps %xmm6,-0xa8(%rax) # offload everything
1203 movaps %xmm7,-0x98(%rax)
1204 movaps %xmm8,-0x88(%rax)
1205 movaps %xmm9,-0x78(%rax)
1206 movaps %xmm10,-0x68(%rax)
1207 movaps %xmm11,-0x58(%rax)
1208 movaps %xmm12,-0x48(%rax)
1209 movaps %xmm13,-0x38(%rax)
1210 movaps %xmm14,-0x28(%rax)
1211 movaps %xmm15,-0x18(%rax)
1212 .Lctr32_body:
1213 ___
1214 $code.=<<___;
1215 lea -8(%rax),%rbp
1216
1217 # 8 16-byte words on top of stack are counter values
1218 # xor-ed with zero-round key
1219
1220 movdqu ($ivp),$inout0
1221 movdqu ($key),$rndkey0
1222 mov 12($ivp),$ctr # counter LSB
1223 pxor $rndkey0,$inout0
1224 mov 12($key),$key0 # 0-round key LSB
1225 movdqa $inout0,0x00(%rsp) # populate counter block
1226 bswap $ctr
1227 movdqa $inout0,$inout1
1228 movdqa $inout0,$inout2
1229 movdqa $inout0,$inout3
1230 movdqa $inout0,0x40(%rsp)
1231 movdqa $inout0,0x50(%rsp)
1232 movdqa $inout0,0x60(%rsp)
1233 mov %rdx,%r10 # about to borrow %rdx
1234 movdqa $inout0,0x70(%rsp)
1235
1236 lea 1($ctr),%rax
1237 lea 2($ctr),%rdx
1238 bswap %eax
1239 bswap %edx
1240 xor $key0,%eax
1241 xor $key0,%edx
1242 pinsrd \$3,%eax,$inout1
1243 lea 3($ctr),%rax
1244 movdqa $inout1,0x10(%rsp)
1245 pinsrd \$3,%edx,$inout2
1246 bswap %eax
1247 mov %r10,%rdx # restore %rdx
1248 lea 4($ctr),%r10
1249 movdqa $inout2,0x20(%rsp)
1250 xor $key0,%eax
1251 bswap %r10d
1252 pinsrd \$3,%eax,$inout3
1253 xor $key0,%r10d
1254 movdqa $inout3,0x30(%rsp)
1255 lea 5($ctr),%r9
1256 mov %r10d,0x40+12(%rsp)
1257 bswap %r9d
1258 lea 6($ctr),%r10
1259 mov 240($key),$rounds # key->rounds
1260 xor $key0,%r9d
1261 bswap %r10d
1262 mov %r9d,0x50+12(%rsp)
1263 xor $key0,%r10d
1264 lea 7($ctr),%r9
1265 mov %r10d,0x60+12(%rsp)
1266 bswap %r9d
1267 mov OPENSSL_ia32cap_P+4(%rip),%r10d
1268 xor $key0,%r9d
1269 and \$`1<<26|1<<22`,%r10d # isolate XSAVE+MOVBE
1270 mov %r9d,0x70+12(%rsp)
1271
1272 $movkey 0x10($key),$rndkey1
1273
1274 movdqa 0x40(%rsp),$inout4
1275 movdqa 0x50(%rsp),$inout5
1276
1277 cmp \$8,$len # $len is in blocks
1278 jb .Lctr32_tail # short input if ($len<8)
1279
1280 sub \$6,$len # $len is biased by -6
1281 cmp \$`1<<22`,%r10d # check for MOVBE without XSAVE
1282 je .Lctr32_6x # [which denotes Atom Silvermont]
1283
1284 lea 0x80($key),$key # size optimization
1285 sub \$2,$len # $len is biased by -8
1286 jmp .Lctr32_loop8
1287
1288 .align 16
1289 .Lctr32_6x:
1290 shl \$4,$rounds
1291 mov \$48,$rnds_
1292 bswap $key0
1293 lea 32($key,$rounds),$key # end of key schedule
1294 sub %rax,%r10 # twisted $rounds
1295 jmp .Lctr32_loop6
1296
1297 .align 16
1298 .Lctr32_loop6:
1299 add \$6,$ctr # next counter value
1300 $movkey -48($key,$rnds_),$rndkey0
1301 aesenc $rndkey1,$inout0
1302 mov $ctr,%eax
1303 xor $key0,%eax
1304 aesenc $rndkey1,$inout1
1305 movbe %eax,`0x00+12`(%rsp) # store next counter value
1306 lea 1($ctr),%eax
1307 aesenc $rndkey1,$inout2
1308 xor $key0,%eax
1309 movbe %eax,`0x10+12`(%rsp)
1310 aesenc $rndkey1,$inout3
1311 lea 2($ctr),%eax
1312 xor $key0,%eax
1313 aesenc $rndkey1,$inout4
1314 movbe %eax,`0x20+12`(%rsp)
1315 lea 3($ctr),%eax
1316 aesenc $rndkey1,$inout5
1317 $movkey -32($key,$rnds_),$rndkey1
1318 xor $key0,%eax
1319
1320 aesenc $rndkey0,$inout0
1321 movbe %eax,`0x30+12`(%rsp)
1322 lea 4($ctr),%eax
1323 aesenc $rndkey0,$inout1
1324 xor $key0,%eax
1325 movbe %eax,`0x40+12`(%rsp)
1326 aesenc $rndkey0,$inout2
1327 lea 5($ctr),%eax
1328 xor $key0,%eax
1329 aesenc $rndkey0,$inout3
1330 movbe %eax,`0x50+12`(%rsp)
1331 mov %r10,%rax # mov $rnds_,$rounds
1332 aesenc $rndkey0,$inout4
1333 aesenc $rndkey0,$inout5
1334 $movkey -16($key,$rnds_),$rndkey0
1335
1336 call .Lenc_loop6
1337
1338 movdqu ($inp),$inout6 # load 6 input blocks
1339 movdqu 0x10($inp),$inout7
1340 movdqu 0x20($inp),$in0
1341 movdqu 0x30($inp),$in1
1342 movdqu 0x40($inp),$in2
1343 movdqu 0x50($inp),$in3
1344 lea 0x60($inp),$inp # $inp+=6*16
1345 $movkey -64($key,$rnds_),$rndkey1
1346 pxor $inout0,$inout6 # inp^=E(ctr)
1347 movaps 0x00(%rsp),$inout0 # load next counter [xor-ed with 0 round]
1348 pxor $inout1,$inout7
1349 movaps 0x10(%rsp),$inout1
1350 pxor $inout2,$in0
1351 movaps 0x20(%rsp),$inout2
1352 pxor $inout3,$in1
1353 movaps 0x30(%rsp),$inout3
1354 pxor $inout4,$in2
1355 movaps 0x40(%rsp),$inout4
1356 pxor $inout5,$in3
1357 movaps 0x50(%rsp),$inout5
1358 movdqu $inout6,($out) # store 6 output blocks
1359 movdqu $inout7,0x10($out)
1360 movdqu $in0,0x20($out)
1361 movdqu $in1,0x30($out)
1362 movdqu $in2,0x40($out)
1363 movdqu $in3,0x50($out)
1364 lea 0x60($out),$out # $out+=6*16
1365
1366 sub \$6,$len
1367 jnc .Lctr32_loop6 # loop if $len-=6 didn't borrow
1368
1369 add \$6,$len # restore real remaining $len
1370 jz .Lctr32_done # done if ($len==0)
1371
1372 lea -48($rnds_),$rounds
1373 lea -80($key,$rnds_),$key # restore $key
1374 neg $rounds
1375 shr \$4,$rounds # restore $rounds
1376 jmp .Lctr32_tail
1377
1378 .align 32
1379 .Lctr32_loop8:
1380 add \$8,$ctr # next counter value
1381 movdqa 0x60(%rsp),$inout6
1382 aesenc $rndkey1,$inout0
1383 mov $ctr,%r9d
1384 movdqa 0x70(%rsp),$inout7
1385 aesenc $rndkey1,$inout1
1386 bswap %r9d
1387 $movkey 0x20-0x80($key),$rndkey0
1388 aesenc $rndkey1,$inout2
1389 xor $key0,%r9d
1390 nop
1391 aesenc $rndkey1,$inout3
1392 mov %r9d,0x00+12(%rsp) # store next counter value
1393 lea 1($ctr),%r9
1394 aesenc $rndkey1,$inout4
1395 aesenc $rndkey1,$inout5
1396 aesenc $rndkey1,$inout6
1397 aesenc $rndkey1,$inout7
1398 $movkey 0x30-0x80($key),$rndkey1
1399 ___
1400 for($i=2;$i<8;$i++) {
1401 my $rndkeyx = ($i&1)?$rndkey1:$rndkey0;
1402 $code.=<<___;
1403 bswap %r9d
1404 aesenc $rndkeyx,$inout0
1405 aesenc $rndkeyx,$inout1
1406 xor $key0,%r9d
1407 .byte 0x66,0x90
1408 aesenc $rndkeyx,$inout2
1409 aesenc $rndkeyx,$inout3
1410 mov %r9d,`0x10*($i-1)`+12(%rsp)
1411 lea $i($ctr),%r9
1412 aesenc $rndkeyx,$inout4
1413 aesenc $rndkeyx,$inout5
1414 aesenc $rndkeyx,$inout6
1415 aesenc $rndkeyx,$inout7
1416 $movkey `0x20+0x10*$i`-0x80($key),$rndkeyx
1417 ___
1418 }
1419 $code.=<<___;
1420 bswap %r9d
1421 aesenc $rndkey0,$inout0
1422 aesenc $rndkey0,$inout1
1423 aesenc $rndkey0,$inout2
1424 xor $key0,%r9d
1425 movdqu 0x00($inp),$in0 # start loading input
1426 aesenc $rndkey0,$inout3
1427 mov %r9d,0x70+12(%rsp)
1428 cmp \$11,$rounds
1429 aesenc $rndkey0,$inout4
1430 aesenc $rndkey0,$inout5
1431 aesenc $rndkey0,$inout6
1432 aesenc $rndkey0,$inout7
1433 $movkey 0xa0-0x80($key),$rndkey0
1434
1435 jb .Lctr32_enc_done
1436
1437 aesenc $rndkey1,$inout0
1438 aesenc $rndkey1,$inout1
1439 aesenc $rndkey1,$inout2
1440 aesenc $rndkey1,$inout3
1441 aesenc $rndkey1,$inout4
1442 aesenc $rndkey1,$inout5
1443 aesenc $rndkey1,$inout6
1444 aesenc $rndkey1,$inout7
1445 $movkey 0xb0-0x80($key),$rndkey1
1446
1447 aesenc $rndkey0,$inout0
1448 aesenc $rndkey0,$inout1
1449 aesenc $rndkey0,$inout2
1450 aesenc $rndkey0,$inout3
1451 aesenc $rndkey0,$inout4
1452 aesenc $rndkey0,$inout5
1453 aesenc $rndkey0,$inout6
1454 aesenc $rndkey0,$inout7
1455 $movkey 0xc0-0x80($key),$rndkey0
1456 je .Lctr32_enc_done
1457
1458 aesenc $rndkey1,$inout0
1459 aesenc $rndkey1,$inout1
1460 aesenc $rndkey1,$inout2
1461 aesenc $rndkey1,$inout3
1462 aesenc $rndkey1,$inout4
1463 aesenc $rndkey1,$inout5
1464 aesenc $rndkey1,$inout6
1465 aesenc $rndkey1,$inout7
1466 $movkey 0xd0-0x80($key),$rndkey1
1467
1468 aesenc $rndkey0,$inout0
1469 aesenc $rndkey0,$inout1
1470 aesenc $rndkey0,$inout2
1471 aesenc $rndkey0,$inout3
1472 aesenc $rndkey0,$inout4
1473 aesenc $rndkey0,$inout5
1474 aesenc $rndkey0,$inout6
1475 aesenc $rndkey0,$inout7
1476 $movkey 0xe0-0x80($key),$rndkey0
1477 jmp .Lctr32_enc_done
1478
1479 .align 16
1480 .Lctr32_enc_done:
1481 movdqu 0x10($inp),$in1
1482 pxor $rndkey0,$in0 # input^=round[last]
1483 movdqu 0x20($inp),$in2
1484 pxor $rndkey0,$in1
1485 movdqu 0x30($inp),$in3
1486 pxor $rndkey0,$in2
1487 movdqu 0x40($inp),$in4
1488 pxor $rndkey0,$in3
1489 movdqu 0x50($inp),$in5
1490 pxor $rndkey0,$in4
1491 pxor $rndkey0,$in5
1492 aesenc $rndkey1,$inout0
1493 aesenc $rndkey1,$inout1
1494 aesenc $rndkey1,$inout2
1495 aesenc $rndkey1,$inout3
1496 aesenc $rndkey1,$inout4
1497 aesenc $rndkey1,$inout5
1498 aesenc $rndkey1,$inout6
1499 aesenc $rndkey1,$inout7
1500 movdqu 0x60($inp),$rndkey1 # borrow $rndkey1 for inp[6]
1501 lea 0x80($inp),$inp # $inp+=8*16
1502
1503 aesenclast $in0,$inout0 # $inN is inp[N]^round[last]
1504 pxor $rndkey0,$rndkey1 # borrowed $rndkey
1505 movdqu 0x70-0x80($inp),$in0
1506 aesenclast $in1,$inout1
1507 pxor $rndkey0,$in0
1508 movdqa 0x00(%rsp),$in1 # load next counter block
1509 aesenclast $in2,$inout2
1510 aesenclast $in3,$inout3
1511 movdqa 0x10(%rsp),$in2
1512 movdqa 0x20(%rsp),$in3
1513 aesenclast $in4,$inout4
1514 aesenclast $in5,$inout5
1515 movdqa 0x30(%rsp),$in4
1516 movdqa 0x40(%rsp),$in5
1517 aesenclast $rndkey1,$inout6
1518 movdqa 0x50(%rsp),$rndkey0
1519 $movkey 0x10-0x80($key),$rndkey1#real 1st-round key
1520 aesenclast $in0,$inout7
1521
1522 movups $inout0,($out) # store 8 output blocks
1523 movdqa $in1,$inout0
1524 movups $inout1,0x10($out)
1525 movdqa $in2,$inout1
1526 movups $inout2,0x20($out)
1527 movdqa $in3,$inout2
1528 movups $inout3,0x30($out)
1529 movdqa $in4,$inout3
1530 movups $inout4,0x40($out)
1531 movdqa $in5,$inout4
1532 movups $inout5,0x50($out)
1533 movdqa $rndkey0,$inout5
1534 movups $inout6,0x60($out)
1535 movups $inout7,0x70($out)
1536 lea 0x80($out),$out # $out+=8*16
1537
1538 sub \$8,$len
1539 jnc .Lctr32_loop8 # loop if $len-=8 didn't borrow
1540
1541 add \$8,$len # restore real remainig $len
1542 jz .Lctr32_done # done if ($len==0)
1543 lea -0x80($key),$key
1544
1545 .Lctr32_tail:
1546 # note that at this point $inout0..5 are populated with
1547 # counter values xor-ed with 0-round key
1548 lea 16($key),$key
1549 cmp \$4,$len
1550 jb .Lctr32_loop3
1551 je .Lctr32_loop4
1552
1553 # if ($len>4) compute 7 E(counter)
1554 shl \$4,$rounds
1555 movdqa 0x60(%rsp),$inout6
1556 pxor $inout7,$inout7
1557
1558 $movkey 16($key),$rndkey0
1559 aesenc $rndkey1,$inout0
1560 aesenc $rndkey1,$inout1
1561 lea 32-16($key,$rounds),$key# prepare for .Lenc_loop8_enter
1562 neg %rax
1563 aesenc $rndkey1,$inout2
1564 add \$16,%rax # prepare for .Lenc_loop8_enter
1565 movups ($inp),$in0
1566 aesenc $rndkey1,$inout3
1567 aesenc $rndkey1,$inout4
1568 movups 0x10($inp),$in1 # pre-load input
1569 movups 0x20($inp),$in2
1570 aesenc $rndkey1,$inout5
1571 aesenc $rndkey1,$inout6
1572
1573 call .Lenc_loop8_enter
1574
1575 movdqu 0x30($inp),$in3
1576 pxor $in0,$inout0
1577 movdqu 0x40($inp),$in0
1578 pxor $in1,$inout1
1579 movdqu $inout0,($out) # store output
1580 pxor $in2,$inout2
1581 movdqu $inout1,0x10($out)
1582 pxor $in3,$inout3
1583 movdqu $inout2,0x20($out)
1584 pxor $in0,$inout4
1585 movdqu $inout3,0x30($out)
1586 movdqu $inout4,0x40($out)
1587 cmp \$6,$len
1588 jb .Lctr32_done # $len was 5, stop store
1589
1590 movups 0x50($inp),$in1
1591 xorps $in1,$inout5
1592 movups $inout5,0x50($out)
1593 je .Lctr32_done # $len was 6, stop store
1594
1595 movups 0x60($inp),$in2
1596 xorps $in2,$inout6
1597 movups $inout6,0x60($out)
1598 jmp .Lctr32_done # $len was 7, stop store
1599
1600 .align 32
1601 .Lctr32_loop4:
1602 aesenc $rndkey1,$inout0
1603 lea 16($key),$key
1604 dec $rounds
1605 aesenc $rndkey1,$inout1
1606 aesenc $rndkey1,$inout2
1607 aesenc $rndkey1,$inout3
1608 $movkey ($key),$rndkey1
1609 jnz .Lctr32_loop4
1610 aesenclast $rndkey1,$inout0
1611 aesenclast $rndkey1,$inout1
1612 movups ($inp),$in0 # load input
1613 movups 0x10($inp),$in1
1614 aesenclast $rndkey1,$inout2
1615 aesenclast $rndkey1,$inout3
1616 movups 0x20($inp),$in2
1617 movups 0x30($inp),$in3
1618
1619 xorps $in0,$inout0
1620 movups $inout0,($out) # store output
1621 xorps $in1,$inout1
1622 movups $inout1,0x10($out)
1623 pxor $in2,$inout2
1624 movdqu $inout2,0x20($out)
1625 pxor $in3,$inout3
1626 movdqu $inout3,0x30($out)
1627 jmp .Lctr32_done # $len was 4, stop store
1628
1629 .align 32
1630 .Lctr32_loop3:
1631 aesenc $rndkey1,$inout0
1632 lea 16($key),$key
1633 dec $rounds
1634 aesenc $rndkey1,$inout1
1635 aesenc $rndkey1,$inout2
1636 $movkey ($key),$rndkey1
1637 jnz .Lctr32_loop3
1638 aesenclast $rndkey1,$inout0
1639 aesenclast $rndkey1,$inout1
1640 aesenclast $rndkey1,$inout2
1641
1642 movups ($inp),$in0 # load input
1643 xorps $in0,$inout0
1644 movups $inout0,($out) # store output
1645 cmp \$2,$len
1646 jb .Lctr32_done # $len was 1, stop store
1647
1648 movups 0x10($inp),$in1
1649 xorps $in1,$inout1
1650 movups $inout1,0x10($out)
1651 je .Lctr32_done # $len was 2, stop store
1652
1653 movups 0x20($inp),$in2
1654 xorps $in2,$inout2
1655 movups $inout2,0x20($out) # $len was 3, stop store
1656
1657 .Lctr32_done:
1658 xorps %xmm0,%xmm0 # clear regiser bank
1659 xor $key0,$key0
1660 pxor %xmm1,%xmm1
1661 pxor %xmm2,%xmm2
1662 pxor %xmm3,%xmm3
1663 pxor %xmm4,%xmm4
1664 pxor %xmm5,%xmm5
1665 ___
1666 $code.=<<___ if (!$win64);
1667 pxor %xmm6,%xmm6
1668 pxor %xmm7,%xmm7
1669 movaps %xmm0,0x00(%rsp) # clear stack
1670 pxor %xmm8,%xmm8
1671 movaps %xmm0,0x10(%rsp)
1672 pxor %xmm9,%xmm9
1673 movaps %xmm0,0x20(%rsp)
1674 pxor %xmm10,%xmm10
1675 movaps %xmm0,0x30(%rsp)
1676 pxor %xmm11,%xmm11
1677 movaps %xmm0,0x40(%rsp)
1678 pxor %xmm12,%xmm12
1679 movaps %xmm0,0x50(%rsp)
1680 pxor %xmm13,%xmm13
1681 movaps %xmm0,0x60(%rsp)
1682 pxor %xmm14,%xmm14
1683 movaps %xmm0,0x70(%rsp)
1684 pxor %xmm15,%xmm15
1685 ___
1686 $code.=<<___ if ($win64);
1687 movaps -0xa0(%rbp),%xmm6
1688 movaps %xmm0,-0xa0(%rbp) # clear stack
1689 movaps -0x90(%rbp),%xmm7
1690 movaps %xmm0,-0x90(%rbp)
1691 movaps -0x80(%rbp),%xmm8
1692 movaps %xmm0,-0x80(%rbp)
1693 movaps -0x70(%rbp),%xmm9
1694 movaps %xmm0,-0x70(%rbp)
1695 movaps -0x60(%rbp),%xmm10
1696 movaps %xmm0,-0x60(%rbp)
1697 movaps -0x50(%rbp),%xmm11
1698 movaps %xmm0,-0x50(%rbp)
1699 movaps -0x40(%rbp),%xmm12
1700 movaps %xmm0,-0x40(%rbp)
1701 movaps -0x30(%rbp),%xmm13
1702 movaps %xmm0,-0x30(%rbp)
1703 movaps -0x20(%rbp),%xmm14
1704 movaps %xmm0,-0x20(%rbp)
1705 movaps -0x10(%rbp),%xmm15
1706 movaps %xmm0,-0x10(%rbp)
1707 movaps %xmm0,0x00(%rsp)
1708 movaps %xmm0,0x10(%rsp)
1709 movaps %xmm0,0x20(%rsp)
1710 movaps %xmm0,0x30(%rsp)
1711 movaps %xmm0,0x40(%rsp)
1712 movaps %xmm0,0x50(%rsp)
1713 movaps %xmm0,0x60(%rsp)
1714 movaps %xmm0,0x70(%rsp)
1715 ___
1716 $code.=<<___;
1717 lea (%rbp),%rsp
1718 pop %rbp
1719 .Lctr32_epilogue:
1720 ret
1721 .size aesni_ctr32_encrypt_blocks,.-aesni_ctr32_encrypt_blocks
1722 ___
1723 }
1724 \f
1725 ######################################################################
1726 # void aesni_xts_[en|de]crypt(const char *inp,char *out,size_t len,
1727 # const AES_KEY *key1, const AES_KEY *key2
1728 # const unsigned char iv[16]);
1729 #
1730 {
1731 my @tweak=map("%xmm$_",(10..15));
1732 my ($twmask,$twres,$twtmp)=("%xmm8","%xmm9",@tweak[4]);
1733 my ($key2,$ivp,$len_)=("%r8","%r9","%r9");
1734 my $frame_size = 0x70 + ($win64?160:0);
1735
1736 $code.=<<___;
1737 .globl aesni_xts_encrypt
1738 .type aesni_xts_encrypt,\@function,6
1739 .align 16
1740 aesni_xts_encrypt:
1741 lea (%rsp),%rax
1742 push %rbp
1743 sub \$$frame_size,%rsp
1744 and \$-16,%rsp # Linux kernel stack can be incorrectly seeded
1745 ___
1746 $code.=<<___ if ($win64);
1747 movaps %xmm6,-0xa8(%rax) # offload everything
1748 movaps %xmm7,-0x98(%rax)
1749 movaps %xmm8,-0x88(%rax)
1750 movaps %xmm9,-0x78(%rax)
1751 movaps %xmm10,-0x68(%rax)
1752 movaps %xmm11,-0x58(%rax)
1753 movaps %xmm12,-0x48(%rax)
1754 movaps %xmm13,-0x38(%rax)
1755 movaps %xmm14,-0x28(%rax)
1756 movaps %xmm15,-0x18(%rax)
1757 .Lxts_enc_body:
1758 ___
1759 $code.=<<___;
1760 lea -8(%rax),%rbp
1761 movups ($ivp),$inout0 # load clear-text tweak
1762 mov 240(%r8),$rounds # key2->rounds
1763 mov 240($key),$rnds_ # key1->rounds
1764 ___
1765 # generate the tweak
1766 &aesni_generate1("enc",$key2,$rounds,$inout0);
1767 $code.=<<___;
1768 $movkey ($key),$rndkey0 # zero round key
1769 mov $key,$key_ # backup $key
1770 mov $rnds_,$rounds # backup $rounds
1771 shl \$4,$rnds_
1772 mov $len,$len_ # backup $len
1773 and \$-16,$len
1774
1775 $movkey 16($key,$rnds_),$rndkey1 # last round key
1776
1777 movdqa .Lxts_magic(%rip),$twmask
1778 movdqa $inout0,@tweak[5]
1779 pshufd \$0x5f,$inout0,$twres
1780 pxor $rndkey0,$rndkey1
1781 ___
1782 # alternative tweak calculation algorithm is based on suggestions
1783 # by Shay Gueron. psrad doesn't conflict with AES-NI instructions
1784 # and should help in the future...
1785 for ($i=0;$i<4;$i++) {
1786 $code.=<<___;
1787 movdqa $twres,$twtmp
1788 paddd $twres,$twres
1789 movdqa @tweak[5],@tweak[$i]
1790 psrad \$31,$twtmp # broadcast upper bits
1791 paddq @tweak[5],@tweak[5]
1792 pand $twmask,$twtmp
1793 pxor $rndkey0,@tweak[$i]
1794 pxor $twtmp,@tweak[5]
1795 ___
1796 }
1797 $code.=<<___;
1798 movdqa @tweak[5],@tweak[4]
1799 psrad \$31,$twres
1800 paddq @tweak[5],@tweak[5]
1801 pand $twmask,$twres
1802 pxor $rndkey0,@tweak[4]
1803 pxor $twres,@tweak[5]
1804 movaps $rndkey1,0x60(%rsp) # save round[0]^round[last]
1805
1806 sub \$16*6,$len
1807 jc .Lxts_enc_short # if $len-=6*16 borrowed
1808
1809 mov \$16+96,$rounds
1810 lea 32($key_,$rnds_),$key # end of key schedule
1811 sub %r10,%rax # twisted $rounds
1812 $movkey 16($key_),$rndkey1
1813 mov %rax,%r10 # backup twisted $rounds
1814 lea .Lxts_magic(%rip),%r8
1815 jmp .Lxts_enc_grandloop
1816
1817 .align 32
1818 .Lxts_enc_grandloop:
1819 movdqu `16*0`($inp),$inout0 # load input
1820 movdqa $rndkey0,$twmask
1821 movdqu `16*1`($inp),$inout1
1822 pxor @tweak[0],$inout0 # input^=tweak^round[0]
1823 movdqu `16*2`($inp),$inout2
1824 pxor @tweak[1],$inout1
1825 aesenc $rndkey1,$inout0
1826 movdqu `16*3`($inp),$inout3
1827 pxor @tweak[2],$inout2
1828 aesenc $rndkey1,$inout1
1829 movdqu `16*4`($inp),$inout4
1830 pxor @tweak[3],$inout3
1831 aesenc $rndkey1,$inout2
1832 movdqu `16*5`($inp),$inout5
1833 pxor @tweak[5],$twmask # round[0]^=tweak[5]
1834 movdqa 0x60(%rsp),$twres # load round[0]^round[last]
1835 pxor @tweak[4],$inout4
1836 aesenc $rndkey1,$inout3
1837 $movkey 32($key_),$rndkey0
1838 lea `16*6`($inp),$inp
1839 pxor $twmask,$inout5
1840
1841 pxor $twres,@tweak[0] # calclulate tweaks^round[last]
1842 aesenc $rndkey1,$inout4
1843 pxor $twres,@tweak[1]
1844 movdqa @tweak[0],`16*0`(%rsp) # put aside tweaks^round[last]
1845 aesenc $rndkey1,$inout5
1846 $movkey 48($key_),$rndkey1
1847 pxor $twres,@tweak[2]
1848
1849 aesenc $rndkey0,$inout0
1850 pxor $twres,@tweak[3]
1851 movdqa @tweak[1],`16*1`(%rsp)
1852 aesenc $rndkey0,$inout1
1853 pxor $twres,@tweak[4]
1854 movdqa @tweak[2],`16*2`(%rsp)
1855 aesenc $rndkey0,$inout2
1856 aesenc $rndkey0,$inout3
1857 pxor $twres,$twmask
1858 movdqa @tweak[4],`16*4`(%rsp)
1859 aesenc $rndkey0,$inout4
1860 aesenc $rndkey0,$inout5
1861 $movkey 64($key_),$rndkey0
1862 movdqa $twmask,`16*5`(%rsp)
1863 pshufd \$0x5f,@tweak[5],$twres
1864 jmp .Lxts_enc_loop6
1865 .align 32
1866 .Lxts_enc_loop6:
1867 aesenc $rndkey1,$inout0
1868 aesenc $rndkey1,$inout1
1869 aesenc $rndkey1,$inout2
1870 aesenc $rndkey1,$inout3
1871 aesenc $rndkey1,$inout4
1872 aesenc $rndkey1,$inout5
1873 $movkey -64($key,%rax),$rndkey1
1874 add \$32,%rax
1875
1876 aesenc $rndkey0,$inout0
1877 aesenc $rndkey0,$inout1
1878 aesenc $rndkey0,$inout2
1879 aesenc $rndkey0,$inout3
1880 aesenc $rndkey0,$inout4
1881 aesenc $rndkey0,$inout5
1882 $movkey -80($key,%rax),$rndkey0
1883 jnz .Lxts_enc_loop6
1884
1885 movdqa (%r8),$twmask # start calculating next tweak
1886 movdqa $twres,$twtmp
1887 paddd $twres,$twres
1888 aesenc $rndkey1,$inout0
1889 paddq @tweak[5],@tweak[5]
1890 psrad \$31,$twtmp
1891 aesenc $rndkey1,$inout1
1892 pand $twmask,$twtmp
1893 $movkey ($key_),@tweak[0] # load round[0]
1894 aesenc $rndkey1,$inout2
1895 aesenc $rndkey1,$inout3
1896 aesenc $rndkey1,$inout4
1897 pxor $twtmp,@tweak[5]
1898 movaps @tweak[0],@tweak[1] # copy round[0]
1899 aesenc $rndkey1,$inout5
1900 $movkey -64($key),$rndkey1
1901
1902 movdqa $twres,$twtmp
1903 aesenc $rndkey0,$inout0
1904 paddd $twres,$twres
1905 pxor @tweak[5],@tweak[0]
1906 aesenc $rndkey0,$inout1
1907 psrad \$31,$twtmp
1908 paddq @tweak[5],@tweak[5]
1909 aesenc $rndkey0,$inout2
1910 aesenc $rndkey0,$inout3
1911 pand $twmask,$twtmp
1912 movaps @tweak[1],@tweak[2]
1913 aesenc $rndkey0,$inout4
1914 pxor $twtmp,@tweak[5]
1915 movdqa $twres,$twtmp
1916 aesenc $rndkey0,$inout5
1917 $movkey -48($key),$rndkey0
1918
1919 paddd $twres,$twres
1920 aesenc $rndkey1,$inout0
1921 pxor @tweak[5],@tweak[1]
1922 psrad \$31,$twtmp
1923 aesenc $rndkey1,$inout1
1924 paddq @tweak[5],@tweak[5]
1925 pand $twmask,$twtmp
1926 aesenc $rndkey1,$inout2
1927 aesenc $rndkey1,$inout3
1928 movdqa @tweak[3],`16*3`(%rsp)
1929 pxor $twtmp,@tweak[5]
1930 aesenc $rndkey1,$inout4
1931 movaps @tweak[2],@tweak[3]
1932 movdqa $twres,$twtmp
1933 aesenc $rndkey1,$inout5
1934 $movkey -32($key),$rndkey1
1935
1936 paddd $twres,$twres
1937 aesenc $rndkey0,$inout0
1938 pxor @tweak[5],@tweak[2]
1939 psrad \$31,$twtmp
1940 aesenc $rndkey0,$inout1
1941 paddq @tweak[5],@tweak[5]
1942 pand $twmask,$twtmp
1943 aesenc $rndkey0,$inout2
1944 aesenc $rndkey0,$inout3
1945 aesenc $rndkey0,$inout4
1946 pxor $twtmp,@tweak[5]
1947 movaps @tweak[3],@tweak[4]
1948 aesenc $rndkey0,$inout5
1949
1950 movdqa $twres,$rndkey0
1951 paddd $twres,$twres
1952 aesenc $rndkey1,$inout0
1953 pxor @tweak[5],@tweak[3]
1954 psrad \$31,$rndkey0
1955 aesenc $rndkey1,$inout1
1956 paddq @tweak[5],@tweak[5]
1957 pand $twmask,$rndkey0
1958 aesenc $rndkey1,$inout2
1959 aesenc $rndkey1,$inout3
1960 pxor $rndkey0,@tweak[5]
1961 $movkey ($key_),$rndkey0
1962 aesenc $rndkey1,$inout4
1963 aesenc $rndkey1,$inout5
1964 $movkey 16($key_),$rndkey1
1965
1966 pxor @tweak[5],@tweak[4]
1967 aesenclast `16*0`(%rsp),$inout0
1968 psrad \$31,$twres
1969 paddq @tweak[5],@tweak[5]
1970 aesenclast `16*1`(%rsp),$inout1
1971 aesenclast `16*2`(%rsp),$inout2
1972 pand $twmask,$twres
1973 mov %r10,%rax # restore $rounds
1974 aesenclast `16*3`(%rsp),$inout3
1975 aesenclast `16*4`(%rsp),$inout4
1976 aesenclast `16*5`(%rsp),$inout5
1977 pxor $twres,@tweak[5]
1978
1979 lea `16*6`($out),$out # $out+=6*16
1980 movups $inout0,`-16*6`($out) # store 6 output blocks
1981 movups $inout1,`-16*5`($out)
1982 movups $inout2,`-16*4`($out)
1983 movups $inout3,`-16*3`($out)
1984 movups $inout4,`-16*2`($out)
1985 movups $inout5,`-16*1`($out)
1986 sub \$16*6,$len
1987 jnc .Lxts_enc_grandloop # loop if $len-=6*16 didn't borrow
1988
1989 mov \$16+96,$rounds
1990 sub $rnds_,$rounds
1991 mov $key_,$key # restore $key
1992 shr \$4,$rounds # restore original value
1993
1994 .Lxts_enc_short:
1995 # at the point @tweak[0..5] are populated with tweak values
1996 mov $rounds,$rnds_ # backup $rounds
1997 pxor $rndkey0,@tweak[0]
1998 add \$16*6,$len # restore real remaining $len
1999 jz .Lxts_enc_done # done if ($len==0)
2000
2001 pxor $rndkey0,@tweak[1]
2002 cmp \$0x20,$len
2003 jb .Lxts_enc_one # $len is 1*16
2004 pxor $rndkey0,@tweak[2]
2005 je .Lxts_enc_two # $len is 2*16
2006
2007 pxor $rndkey0,@tweak[3]
2008 cmp \$0x40,$len
2009 jb .Lxts_enc_three # $len is 3*16
2010 pxor $rndkey0,@tweak[4]
2011 je .Lxts_enc_four # $len is 4*16
2012
2013 movdqu ($inp),$inout0 # $len is 5*16
2014 movdqu 16*1($inp),$inout1
2015 movdqu 16*2($inp),$inout2
2016 pxor @tweak[0],$inout0
2017 movdqu 16*3($inp),$inout3
2018 pxor @tweak[1],$inout1
2019 movdqu 16*4($inp),$inout4
2020 lea 16*5($inp),$inp # $inp+=5*16
2021 pxor @tweak[2],$inout2
2022 pxor @tweak[3],$inout3
2023 pxor @tweak[4],$inout4
2024 pxor $inout5,$inout5
2025
2026 call _aesni_encrypt6
2027
2028 xorps @tweak[0],$inout0
2029 movdqa @tweak[5],@tweak[0]
2030 xorps @tweak[1],$inout1
2031 xorps @tweak[2],$inout2
2032 movdqu $inout0,($out) # store 5 output blocks
2033 xorps @tweak[3],$inout3
2034 movdqu $inout1,16*1($out)
2035 xorps @tweak[4],$inout4
2036 movdqu $inout2,16*2($out)
2037 movdqu $inout3,16*3($out)
2038 movdqu $inout4,16*4($out)
2039 lea 16*5($out),$out # $out+=5*16
2040 jmp .Lxts_enc_done
2041
2042 .align 16
2043 .Lxts_enc_one:
2044 movups ($inp),$inout0
2045 lea 16*1($inp),$inp # inp+=1*16
2046 xorps @tweak[0],$inout0
2047 ___
2048 &aesni_generate1("enc",$key,$rounds);
2049 $code.=<<___;
2050 xorps @tweak[0],$inout0
2051 movdqa @tweak[1],@tweak[0]
2052 movups $inout0,($out) # store one output block
2053 lea 16*1($out),$out # $out+=1*16
2054 jmp .Lxts_enc_done
2055
2056 .align 16
2057 .Lxts_enc_two:
2058 movups ($inp),$inout0
2059 movups 16($inp),$inout1
2060 lea 32($inp),$inp # $inp+=2*16
2061 xorps @tweak[0],$inout0
2062 xorps @tweak[1],$inout1
2063
2064 call _aesni_encrypt2
2065
2066 xorps @tweak[0],$inout0
2067 movdqa @tweak[2],@tweak[0]
2068 xorps @tweak[1],$inout1
2069 movups $inout0,($out) # store 2 output blocks
2070 movups $inout1,16*1($out)
2071 lea 16*2($out),$out # $out+=2*16
2072 jmp .Lxts_enc_done
2073
2074 .align 16
2075 .Lxts_enc_three:
2076 movups ($inp),$inout0
2077 movups 16*1($inp),$inout1
2078 movups 16*2($inp),$inout2
2079 lea 16*3($inp),$inp # $inp+=3*16
2080 xorps @tweak[0],$inout0
2081 xorps @tweak[1],$inout1
2082 xorps @tweak[2],$inout2
2083
2084 call _aesni_encrypt3
2085
2086 xorps @tweak[0],$inout0
2087 movdqa @tweak[3],@tweak[0]
2088 xorps @tweak[1],$inout1
2089 xorps @tweak[2],$inout2
2090 movups $inout0,($out) # store 3 output blocks
2091 movups $inout1,16*1($out)
2092 movups $inout2,16*2($out)
2093 lea 16*3($out),$out # $out+=3*16
2094 jmp .Lxts_enc_done
2095
2096 .align 16
2097 .Lxts_enc_four:
2098 movups ($inp),$inout0
2099 movups 16*1($inp),$inout1
2100 movups 16*2($inp),$inout2
2101 xorps @tweak[0],$inout0
2102 movups 16*3($inp),$inout3
2103 lea 16*4($inp),$inp # $inp+=4*16
2104 xorps @tweak[1],$inout1
2105 xorps @tweak[2],$inout2
2106 xorps @tweak[3],$inout3
2107
2108 call _aesni_encrypt4
2109
2110 pxor @tweak[0],$inout0
2111 movdqa @tweak[4],@tweak[0]
2112 pxor @tweak[1],$inout1
2113 pxor @tweak[2],$inout2
2114 movdqu $inout0,($out) # store 4 output blocks
2115 pxor @tweak[3],$inout3
2116 movdqu $inout1,16*1($out)
2117 movdqu $inout2,16*2($out)
2118 movdqu $inout3,16*3($out)
2119 lea 16*4($out),$out # $out+=4*16
2120 jmp .Lxts_enc_done
2121
2122 .align 16
2123 .Lxts_enc_done:
2124 and \$15,$len_ # see if $len%16 is 0
2125 jz .Lxts_enc_ret
2126 mov $len_,$len
2127
2128 .Lxts_enc_steal:
2129 movzb ($inp),%eax # borrow $rounds ...
2130 movzb -16($out),%ecx # ... and $key
2131 lea 1($inp),$inp
2132 mov %al,-16($out)
2133 mov %cl,0($out)
2134 lea 1($out),$out
2135 sub \$1,$len
2136 jnz .Lxts_enc_steal
2137
2138 sub $len_,$out # rewind $out
2139 mov $key_,$key # restore $key
2140 mov $rnds_,$rounds # restore $rounds
2141
2142 movups -16($out),$inout0
2143 xorps @tweak[0],$inout0
2144 ___
2145 &aesni_generate1("enc",$key,$rounds);
2146 $code.=<<___;
2147 xorps @tweak[0],$inout0
2148 movups $inout0,-16($out)
2149
2150 .Lxts_enc_ret:
2151 xorps %xmm0,%xmm0 # clear register bank
2152 pxor %xmm1,%xmm1
2153 pxor %xmm2,%xmm2
2154 pxor %xmm3,%xmm3
2155 pxor %xmm4,%xmm4
2156 pxor %xmm5,%xmm5
2157 ___
2158 $code.=<<___ if (!$win64);
2159 pxor %xmm6,%xmm6
2160 pxor %xmm7,%xmm7
2161 movaps %xmm0,0x00(%rsp) # clear stack
2162 pxor %xmm8,%xmm8
2163 movaps %xmm0,0x10(%rsp)
2164 pxor %xmm9,%xmm9
2165 movaps %xmm0,0x20(%rsp)
2166 pxor %xmm10,%xmm10
2167 movaps %xmm0,0x30(%rsp)
2168 pxor %xmm11,%xmm11
2169 movaps %xmm0,0x40(%rsp)
2170 pxor %xmm12,%xmm12
2171 movaps %xmm0,0x50(%rsp)
2172 pxor %xmm13,%xmm13
2173 movaps %xmm0,0x60(%rsp)
2174 pxor %xmm14,%xmm14
2175 pxor %xmm15,%xmm15
2176 ___
2177 $code.=<<___ if ($win64);
2178 movaps -0xa0(%rbp),%xmm6
2179 movaps %xmm0,-0xa0(%rbp) # clear stack
2180 movaps -0x90(%rbp),%xmm7
2181 movaps %xmm0,-0x90(%rbp)
2182 movaps -0x80(%rbp),%xmm8
2183 movaps %xmm0,-0x80(%rbp)
2184 movaps -0x70(%rbp),%xmm9
2185 movaps %xmm0,-0x70(%rbp)
2186 movaps -0x60(%rbp),%xmm10
2187 movaps %xmm0,-0x60(%rbp)
2188 movaps -0x50(%rbp),%xmm11
2189 movaps %xmm0,-0x50(%rbp)
2190 movaps -0x40(%rbp),%xmm12
2191 movaps %xmm0,-0x40(%rbp)
2192 movaps -0x30(%rbp),%xmm13
2193 movaps %xmm0,-0x30(%rbp)
2194 movaps -0x20(%rbp),%xmm14
2195 movaps %xmm0,-0x20(%rbp)
2196 movaps -0x10(%rbp),%xmm15
2197 movaps %xmm0,-0x10(%rbp)
2198 movaps %xmm0,0x00(%rsp)
2199 movaps %xmm0,0x10(%rsp)
2200 movaps %xmm0,0x20(%rsp)
2201 movaps %xmm0,0x30(%rsp)
2202 movaps %xmm0,0x40(%rsp)
2203 movaps %xmm0,0x50(%rsp)
2204 movaps %xmm0,0x60(%rsp)
2205 ___
2206 $code.=<<___;
2207 lea (%rbp),%rsp
2208 pop %rbp
2209 .Lxts_enc_epilogue:
2210 ret
2211 .size aesni_xts_encrypt,.-aesni_xts_encrypt
2212 ___
2213
2214 $code.=<<___;
2215 .globl aesni_xts_decrypt
2216 .type aesni_xts_decrypt,\@function,6
2217 .align 16
2218 aesni_xts_decrypt:
2219 lea (%rsp),%rax
2220 push %rbp
2221 sub \$$frame_size,%rsp
2222 and \$-16,%rsp # Linux kernel stack can be incorrectly seeded
2223 ___
2224 $code.=<<___ if ($win64);
2225 movaps %xmm6,-0xa8(%rax) # offload everything
2226 movaps %xmm7,-0x98(%rax)
2227 movaps %xmm8,-0x88(%rax)
2228 movaps %xmm9,-0x78(%rax)
2229 movaps %xmm10,-0x68(%rax)
2230 movaps %xmm11,-0x58(%rax)
2231 movaps %xmm12,-0x48(%rax)
2232 movaps %xmm13,-0x38(%rax)
2233 movaps %xmm14,-0x28(%rax)
2234 movaps %xmm15,-0x18(%rax)
2235 .Lxts_dec_body:
2236 ___
2237 $code.=<<___;
2238 lea -8(%rax),%rbp
2239 movups ($ivp),$inout0 # load clear-text tweak
2240 mov 240($key2),$rounds # key2->rounds
2241 mov 240($key),$rnds_ # key1->rounds
2242 ___
2243 # generate the tweak
2244 &aesni_generate1("enc",$key2,$rounds,$inout0);
2245 $code.=<<___;
2246 xor %eax,%eax # if ($len%16) len-=16;
2247 test \$15,$len
2248 setnz %al
2249 shl \$4,%rax
2250 sub %rax,$len
2251
2252 $movkey ($key),$rndkey0 # zero round key
2253 mov $key,$key_ # backup $key
2254 mov $rnds_,$rounds # backup $rounds
2255 shl \$4,$rnds_
2256 mov $len,$len_ # backup $len
2257 and \$-16,$len
2258
2259 $movkey 16($key,$rnds_),$rndkey1 # last round key
2260
2261 movdqa .Lxts_magic(%rip),$twmask
2262 movdqa $inout0,@tweak[5]
2263 pshufd \$0x5f,$inout0,$twres
2264 pxor $rndkey0,$rndkey1
2265 ___
2266 for ($i=0;$i<4;$i++) {
2267 $code.=<<___;
2268 movdqa $twres,$twtmp
2269 paddd $twres,$twres
2270 movdqa @tweak[5],@tweak[$i]
2271 psrad \$31,$twtmp # broadcast upper bits
2272 paddq @tweak[5],@tweak[5]
2273 pand $twmask,$twtmp
2274 pxor $rndkey0,@tweak[$i]
2275 pxor $twtmp,@tweak[5]
2276 ___
2277 }
2278 $code.=<<___;
2279 movdqa @tweak[5],@tweak[4]
2280 psrad \$31,$twres
2281 paddq @tweak[5],@tweak[5]
2282 pand $twmask,$twres
2283 pxor $rndkey0,@tweak[4]
2284 pxor $twres,@tweak[5]
2285 movaps $rndkey1,0x60(%rsp) # save round[0]^round[last]
2286
2287 sub \$16*6,$len
2288 jc .Lxts_dec_short # if $len-=6*16 borrowed
2289
2290 mov \$16+96,$rounds
2291 lea 32($key_,$rnds_),$key # end of key schedule
2292 sub %r10,%rax # twisted $rounds
2293 $movkey 16($key_),$rndkey1
2294 mov %rax,%r10 # backup twisted $rounds
2295 lea .Lxts_magic(%rip),%r8
2296 jmp .Lxts_dec_grandloop
2297
2298 .align 32
2299 .Lxts_dec_grandloop:
2300 movdqu `16*0`($inp),$inout0 # load input
2301 movdqa $rndkey0,$twmask
2302 movdqu `16*1`($inp),$inout1
2303 pxor @tweak[0],$inout0 # intput^=tweak^round[0]
2304 movdqu `16*2`($inp),$inout2
2305 pxor @tweak[1],$inout1
2306 aesdec $rndkey1,$inout0
2307 movdqu `16*3`($inp),$inout3
2308 pxor @tweak[2],$inout2
2309 aesdec $rndkey1,$inout1
2310 movdqu `16*4`($inp),$inout4
2311 pxor @tweak[3],$inout3
2312 aesdec $rndkey1,$inout2
2313 movdqu `16*5`($inp),$inout5
2314 pxor @tweak[5],$twmask # round[0]^=tweak[5]
2315 movdqa 0x60(%rsp),$twres # load round[0]^round[last]
2316 pxor @tweak[4],$inout4
2317 aesdec $rndkey1,$inout3
2318 $movkey 32($key_),$rndkey0
2319 lea `16*6`($inp),$inp
2320 pxor $twmask,$inout5
2321
2322 pxor $twres,@tweak[0] # calclulate tweaks^round[last]
2323 aesdec $rndkey1,$inout4
2324 pxor $twres,@tweak[1]
2325 movdqa @tweak[0],`16*0`(%rsp) # put aside tweaks^last round key
2326 aesdec $rndkey1,$inout5
2327 $movkey 48($key_),$rndkey1
2328 pxor $twres,@tweak[2]
2329
2330 aesdec $rndkey0,$inout0
2331 pxor $twres,@tweak[3]
2332 movdqa @tweak[1],`16*1`(%rsp)
2333 aesdec $rndkey0,$inout1
2334 pxor $twres,@tweak[4]
2335 movdqa @tweak[2],`16*2`(%rsp)
2336 aesdec $rndkey0,$inout2
2337 aesdec $rndkey0,$inout3
2338 pxor $twres,$twmask
2339 movdqa @tweak[4],`16*4`(%rsp)
2340 aesdec $rndkey0,$inout4
2341 aesdec $rndkey0,$inout5
2342 $movkey 64($key_),$rndkey0
2343 movdqa $twmask,`16*5`(%rsp)
2344 pshufd \$0x5f,@tweak[5],$twres
2345 jmp .Lxts_dec_loop6
2346 .align 32
2347 .Lxts_dec_loop6:
2348 aesdec $rndkey1,$inout0
2349 aesdec $rndkey1,$inout1
2350 aesdec $rndkey1,$inout2
2351 aesdec $rndkey1,$inout3
2352 aesdec $rndkey1,$inout4
2353 aesdec $rndkey1,$inout5
2354 $movkey -64($key,%rax),$rndkey1
2355 add \$32,%rax
2356
2357 aesdec $rndkey0,$inout0
2358 aesdec $rndkey0,$inout1
2359 aesdec $rndkey0,$inout2
2360 aesdec $rndkey0,$inout3
2361 aesdec $rndkey0,$inout4
2362 aesdec $rndkey0,$inout5
2363 $movkey -80($key,%rax),$rndkey0
2364 jnz .Lxts_dec_loop6
2365
2366 movdqa (%r8),$twmask # start calculating next tweak
2367 movdqa $twres,$twtmp
2368 paddd $twres,$twres
2369 aesdec $rndkey1,$inout0
2370 paddq @tweak[5],@tweak[5]
2371 psrad \$31,$twtmp
2372 aesdec $rndkey1,$inout1
2373 pand $twmask,$twtmp
2374 $movkey ($key_),@tweak[0] # load round[0]
2375 aesdec $rndkey1,$inout2
2376 aesdec $rndkey1,$inout3
2377 aesdec $rndkey1,$inout4
2378 pxor $twtmp,@tweak[5]
2379 movaps @tweak[0],@tweak[1] # copy round[0]
2380 aesdec $rndkey1,$inout5
2381 $movkey -64($key),$rndkey1
2382
2383 movdqa $twres,$twtmp
2384 aesdec $rndkey0,$inout0
2385 paddd $twres,$twres
2386 pxor @tweak[5],@tweak[0]
2387 aesdec $rndkey0,$inout1
2388 psrad \$31,$twtmp
2389 paddq @tweak[5],@tweak[5]
2390 aesdec $rndkey0,$inout2
2391 aesdec $rndkey0,$inout3
2392 pand $twmask,$twtmp
2393 movaps @tweak[1],@tweak[2]
2394 aesdec $rndkey0,$inout4
2395 pxor $twtmp,@tweak[5]
2396 movdqa $twres,$twtmp
2397 aesdec $rndkey0,$inout5
2398 $movkey -48($key),$rndkey0
2399
2400 paddd $twres,$twres
2401 aesdec $rndkey1,$inout0
2402 pxor @tweak[5],@tweak[1]
2403 psrad \$31,$twtmp
2404 aesdec $rndkey1,$inout1
2405 paddq @tweak[5],@tweak[5]
2406 pand $twmask,$twtmp
2407 aesdec $rndkey1,$inout2
2408 aesdec $rndkey1,$inout3
2409 movdqa @tweak[3],`16*3`(%rsp)
2410 pxor $twtmp,@tweak[5]
2411 aesdec $rndkey1,$inout4
2412 movaps @tweak[2],@tweak[3]
2413 movdqa $twres,$twtmp
2414 aesdec $rndkey1,$inout5
2415 $movkey -32($key),$rndkey1
2416
2417 paddd $twres,$twres
2418 aesdec $rndkey0,$inout0
2419 pxor @tweak[5],@tweak[2]
2420 psrad \$31,$twtmp
2421 aesdec $rndkey0,$inout1
2422 paddq @tweak[5],@tweak[5]
2423 pand $twmask,$twtmp
2424 aesdec $rndkey0,$inout2
2425 aesdec $rndkey0,$inout3
2426 aesdec $rndkey0,$inout4
2427 pxor $twtmp,@tweak[5]
2428 movaps @tweak[3],@tweak[4]
2429 aesdec $rndkey0,$inout5
2430
2431 movdqa $twres,$rndkey0
2432 paddd $twres,$twres
2433 aesdec $rndkey1,$inout0
2434 pxor @tweak[5],@tweak[3]
2435 psrad \$31,$rndkey0
2436 aesdec $rndkey1,$inout1
2437 paddq @tweak[5],@tweak[5]
2438 pand $twmask,$rndkey0
2439 aesdec $rndkey1,$inout2
2440 aesdec $rndkey1,$inout3
2441 pxor $rndkey0,@tweak[5]
2442 $movkey ($key_),$rndkey0
2443 aesdec $rndkey1,$inout4
2444 aesdec $rndkey1,$inout5
2445 $movkey 16($key_),$rndkey1
2446
2447 pxor @tweak[5],@tweak[4]
2448 aesdeclast `16*0`(%rsp),$inout0
2449 psrad \$31,$twres
2450 paddq @tweak[5],@tweak[5]
2451 aesdeclast `16*1`(%rsp),$inout1
2452 aesdeclast `16*2`(%rsp),$inout2
2453 pand $twmask,$twres
2454 mov %r10,%rax # restore $rounds
2455 aesdeclast `16*3`(%rsp),$inout3
2456 aesdeclast `16*4`(%rsp),$inout4
2457 aesdeclast `16*5`(%rsp),$inout5
2458 pxor $twres,@tweak[5]
2459
2460 lea `16*6`($out),$out # $out+=6*16
2461 movups $inout0,`-16*6`($out) # store 6 output blocks
2462 movups $inout1,`-16*5`($out)
2463 movups $inout2,`-16*4`($out)
2464 movups $inout3,`-16*3`($out)
2465 movups $inout4,`-16*2`($out)
2466 movups $inout5,`-16*1`($out)
2467 sub \$16*6,$len
2468 jnc .Lxts_dec_grandloop # loop if $len-=6*16 didn't borrow
2469
2470 mov \$16+96,$rounds
2471 sub $rnds_,$rounds
2472 mov $key_,$key # restore $key
2473 shr \$4,$rounds # restore original value
2474
2475 .Lxts_dec_short:
2476 # at the point @tweak[0..5] are populated with tweak values
2477 mov $rounds,$rnds_ # backup $rounds
2478 pxor $rndkey0,@tweak[0]
2479 pxor $rndkey0,@tweak[1]
2480 add \$16*6,$len # restore real remaining $len
2481 jz .Lxts_dec_done # done if ($len==0)
2482
2483 pxor $rndkey0,@tweak[2]
2484 cmp \$0x20,$len
2485 jb .Lxts_dec_one # $len is 1*16
2486 pxor $rndkey0,@tweak[3]
2487 je .Lxts_dec_two # $len is 2*16
2488
2489 pxor $rndkey0,@tweak[4]
2490 cmp \$0x40,$len
2491 jb .Lxts_dec_three # $len is 3*16
2492 je .Lxts_dec_four # $len is 4*16
2493
2494 movdqu ($inp),$inout0 # $len is 5*16
2495 movdqu 16*1($inp),$inout1
2496 movdqu 16*2($inp),$inout2
2497 pxor @tweak[0],$inout0
2498 movdqu 16*3($inp),$inout3
2499 pxor @tweak[1],$inout1
2500 movdqu 16*4($inp),$inout4
2501 lea 16*5($inp),$inp # $inp+=5*16
2502 pxor @tweak[2],$inout2
2503 pxor @tweak[3],$inout3
2504 pxor @tweak[4],$inout4
2505
2506 call _aesni_decrypt6
2507
2508 xorps @tweak[0],$inout0
2509 xorps @tweak[1],$inout1
2510 xorps @tweak[2],$inout2
2511 movdqu $inout0,($out) # store 5 output blocks
2512 xorps @tweak[3],$inout3
2513 movdqu $inout1,16*1($out)
2514 xorps @tweak[4],$inout4
2515 movdqu $inout2,16*2($out)
2516 pxor $twtmp,$twtmp
2517 movdqu $inout3,16*3($out)
2518 pcmpgtd @tweak[5],$twtmp
2519 movdqu $inout4,16*4($out)
2520 lea 16*5($out),$out # $out+=5*16
2521 pshufd \$0x13,$twtmp,@tweak[1] # $twres
2522 and \$15,$len_
2523 jz .Lxts_dec_ret
2524
2525 movdqa @tweak[5],@tweak[0]
2526 paddq @tweak[5],@tweak[5] # psllq 1,$tweak
2527 pand $twmask,@tweak[1] # isolate carry and residue
2528 pxor @tweak[5],@tweak[1]
2529 jmp .Lxts_dec_done2
2530
2531 .align 16
2532 .Lxts_dec_one:
2533 movups ($inp),$inout0
2534 lea 16*1($inp),$inp # $inp+=1*16
2535 xorps @tweak[0],$inout0
2536 ___
2537 &aesni_generate1("dec",$key,$rounds);
2538 $code.=<<___;
2539 xorps @tweak[0],$inout0
2540 movdqa @tweak[1],@tweak[0]
2541 movups $inout0,($out) # store one output block
2542 movdqa @tweak[2],@tweak[1]
2543 lea 16*1($out),$out # $out+=1*16
2544 jmp .Lxts_dec_done
2545
2546 .align 16
2547 .Lxts_dec_two:
2548 movups ($inp),$inout0
2549 movups 16($inp),$inout1
2550 lea 32($inp),$inp # $inp+=2*16
2551 xorps @tweak[0],$inout0
2552 xorps @tweak[1],$inout1
2553
2554 call _aesni_decrypt2
2555
2556 xorps @tweak[0],$inout0
2557 movdqa @tweak[2],@tweak[0]
2558 xorps @tweak[1],$inout1
2559 movdqa @tweak[3],@tweak[1]
2560 movups $inout0,($out) # store 2 output blocks
2561 movups $inout1,16*1($out)
2562 lea 16*2($out),$out # $out+=2*16
2563 jmp .Lxts_dec_done
2564
2565 .align 16
2566 .Lxts_dec_three:
2567 movups ($inp),$inout0
2568 movups 16*1($inp),$inout1
2569 movups 16*2($inp),$inout2
2570 lea 16*3($inp),$inp # $inp+=3*16
2571 xorps @tweak[0],$inout0
2572 xorps @tweak[1],$inout1
2573 xorps @tweak[2],$inout2
2574
2575 call _aesni_decrypt3
2576
2577 xorps @tweak[0],$inout0
2578 movdqa @tweak[3],@tweak[0]
2579 xorps @tweak[1],$inout1
2580 movdqa @tweak[4],@tweak[1]
2581 xorps @tweak[2],$inout2
2582 movups $inout0,($out) # store 3 output blocks
2583 movups $inout1,16*1($out)
2584 movups $inout2,16*2($out)
2585 lea 16*3($out),$out # $out+=3*16
2586 jmp .Lxts_dec_done
2587
2588 .align 16
2589 .Lxts_dec_four:
2590 movups ($inp),$inout0
2591 movups 16*1($inp),$inout1
2592 movups 16*2($inp),$inout2
2593 xorps @tweak[0],$inout0
2594 movups 16*3($inp),$inout3
2595 lea 16*4($inp),$inp # $inp+=4*16
2596 xorps @tweak[1],$inout1
2597 xorps @tweak[2],$inout2
2598 xorps @tweak[3],$inout3
2599
2600 call _aesni_decrypt4
2601
2602 pxor @tweak[0],$inout0
2603 movdqa @tweak[4],@tweak[0]
2604 pxor @tweak[1],$inout1
2605 movdqa @tweak[5],@tweak[1]
2606 pxor @tweak[2],$inout2
2607 movdqu $inout0,($out) # store 4 output blocks
2608 pxor @tweak[3],$inout3
2609 movdqu $inout1,16*1($out)
2610 movdqu $inout2,16*2($out)
2611 movdqu $inout3,16*3($out)
2612 lea 16*4($out),$out # $out+=4*16
2613 jmp .Lxts_dec_done
2614
2615 .align 16
2616 .Lxts_dec_done:
2617 and \$15,$len_ # see if $len%16 is 0
2618 jz .Lxts_dec_ret
2619 .Lxts_dec_done2:
2620 mov $len_,$len
2621 mov $key_,$key # restore $key
2622 mov $rnds_,$rounds # restore $rounds
2623
2624 movups ($inp),$inout0
2625 xorps @tweak[1],$inout0
2626 ___
2627 &aesni_generate1("dec",$key,$rounds);
2628 $code.=<<___;
2629 xorps @tweak[1],$inout0
2630 movups $inout0,($out)
2631
2632 .Lxts_dec_steal:
2633 movzb 16($inp),%eax # borrow $rounds ...
2634 movzb ($out),%ecx # ... and $key
2635 lea 1($inp),$inp
2636 mov %al,($out)
2637 mov %cl,16($out)
2638 lea 1($out),$out
2639 sub \$1,$len
2640 jnz .Lxts_dec_steal
2641
2642 sub $len_,$out # rewind $out
2643 mov $key_,$key # restore $key
2644 mov $rnds_,$rounds # restore $rounds
2645
2646 movups ($out),$inout0
2647 xorps @tweak[0],$inout0
2648 ___
2649 &aesni_generate1("dec",$key,$rounds);
2650 $code.=<<___;
2651 xorps @tweak[0],$inout0
2652 movups $inout0,($out)
2653
2654 .Lxts_dec_ret:
2655 xorps %xmm0,%xmm0 # clear register bank
2656 pxor %xmm1,%xmm1
2657 pxor %xmm2,%xmm2
2658 pxor %xmm3,%xmm3
2659 pxor %xmm4,%xmm4
2660 pxor %xmm5,%xmm5
2661 ___
2662 $code.=<<___ if (!$win64);
2663 pxor %xmm6,%xmm6
2664 pxor %xmm7,%xmm7
2665 movaps %xmm0,0x00(%rsp) # clear stack
2666 pxor %xmm8,%xmm8
2667 movaps %xmm0,0x10(%rsp)
2668 pxor %xmm9,%xmm9
2669 movaps %xmm0,0x20(%rsp)
2670 pxor %xmm10,%xmm10
2671 movaps %xmm0,0x30(%rsp)
2672 pxor %xmm11,%xmm11
2673 movaps %xmm0,0x40(%rsp)
2674 pxor %xmm12,%xmm12
2675 movaps %xmm0,0x50(%rsp)
2676 pxor %xmm13,%xmm13
2677 movaps %xmm0,0x60(%rsp)
2678 pxor %xmm14,%xmm14
2679 pxor %xmm15,%xmm15
2680 ___
2681 $code.=<<___ if ($win64);
2682 movaps -0xa0(%rbp),%xmm6
2683 movaps %xmm0,-0xa0(%rbp) # clear stack
2684 movaps -0x90(%rbp),%xmm7
2685 movaps %xmm0,-0x90(%rbp)
2686 movaps -0x80(%rbp),%xmm8
2687 movaps %xmm0,-0x80(%rbp)
2688 movaps -0x70(%rbp),%xmm9
2689 movaps %xmm0,-0x70(%rbp)
2690 movaps -0x60(%rbp),%xmm10
2691 movaps %xmm0,-0x60(%rbp)
2692 movaps -0x50(%rbp),%xmm11
2693 movaps %xmm0,-0x50(%rbp)
2694 movaps -0x40(%rbp),%xmm12
2695 movaps %xmm0,-0x40(%rbp)
2696 movaps -0x30(%rbp),%xmm13
2697 movaps %xmm0,-0x30(%rbp)
2698 movaps -0x20(%rbp),%xmm14
2699 movaps %xmm0,-0x20(%rbp)
2700 movaps -0x10(%rbp),%xmm15
2701 movaps %xmm0,-0x10(%rbp)
2702 movaps %xmm0,0x00(%rsp)
2703 movaps %xmm0,0x10(%rsp)
2704 movaps %xmm0,0x20(%rsp)
2705 movaps %xmm0,0x30(%rsp)
2706 movaps %xmm0,0x40(%rsp)
2707 movaps %xmm0,0x50(%rsp)
2708 movaps %xmm0,0x60(%rsp)
2709 ___
2710 $code.=<<___;
2711 lea (%rbp),%rsp
2712 pop %rbp
2713 .Lxts_dec_epilogue:
2714 ret
2715 .size aesni_xts_decrypt,.-aesni_xts_decrypt
2716 ___
2717 }
2718 \f
2719 ######################################################################
2720 # void aesni_ocb_[en|de]crypt(const char *inp, char *out, size_t blocks,
2721 # const AES_KEY *key, unsigned int start_block_num,
2722 # unsigned char offset_i[16], const unsigned char L_[][16],
2723 # unsigned char checksum[16]);
2724 #
2725 {
2726 my @offset=map("%xmm$_",(10..15));
2727 my ($checksum,$rndkey0l)=("%xmm8","%xmm9");
2728 my ($block_num,$offset_p)=("%r8","%r9"); # 5th and 6th arguments
2729 my ($L_p,$checksum_p) = ("%rbx","%rbp");
2730 my ($i1,$i3,$i5) = ("%r12","%r13","%r14");
2731 my $seventh_arg = $win64 ? 56 : 8;
2732 my $blocks = $len;
2733
2734 $code.=<<___;
2735 .globl aesni_ocb_encrypt
2736 .type aesni_ocb_encrypt,\@function,6
2737 .align 32
2738 aesni_ocb_encrypt:
2739 lea (%rsp),%rax
2740 push %rbx
2741 push %rbp
2742 push %r12
2743 push %r13
2744 push %r14
2745 ___
2746 $code.=<<___ if ($win64);
2747 lea -0xa0(%rsp),%rsp
2748 movaps %xmm6,0x00(%rsp) # offload everything
2749 movaps %xmm7,0x10(%rsp)
2750 movaps %xmm8,0x20(%rsp)
2751 movaps %xmm9,0x30(%rsp)
2752 movaps %xmm10,0x40(%rsp)
2753 movaps %xmm11,0x50(%rsp)
2754 movaps %xmm12,0x60(%rsp)
2755 movaps %xmm13,0x70(%rsp)
2756 movaps %xmm14,0x80(%rsp)
2757 movaps %xmm15,0x90(%rsp)
2758 .Locb_enc_body:
2759 ___
2760 $code.=<<___;
2761 mov $seventh_arg(%rax),$L_p # 7th argument
2762 mov $seventh_arg+8(%rax),$checksum_p# 8th argument
2763
2764 mov 240($key),$rnds_
2765 mov $key,$key_
2766 shl \$4,$rnds_
2767 $movkey ($key),$rndkey0l # round[0]
2768 $movkey 16($key,$rnds_),$rndkey1 # round[last]
2769
2770 movdqu ($offset_p),@offset[5] # load last offset_i
2771 pxor $rndkey1,$rndkey0l # round[0] ^ round[last]
2772 pxor $rndkey1,@offset[5] # offset_i ^ round[last]
2773
2774 mov \$16+32,$rounds
2775 lea 32($key_,$rnds_),$key
2776 $movkey 16($key_),$rndkey1 # round[1]
2777 sub %r10,%rax # twisted $rounds
2778 mov %rax,%r10 # backup twisted $rounds
2779
2780 movdqu ($L_p),@offset[0] # L_0 for all odd-numbered blocks
2781 movdqu ($checksum_p),$checksum # load checksum
2782
2783 test \$1,$block_num # is first block number odd?
2784 jnz .Locb_enc_odd
2785
2786 bsf $block_num,$i1
2787 add \$1,$block_num
2788 shl \$4,$i1
2789 movdqu ($L_p,$i1),$inout5 # borrow
2790 movdqu ($inp),$inout0
2791 lea 16($inp),$inp
2792
2793 call __ocb_encrypt1
2794
2795 movdqa $inout5,@offset[5]
2796 movups $inout0,($out)
2797 lea 16($out),$out
2798 sub \$1,$blocks
2799 jz .Locb_enc_done
2800
2801 .Locb_enc_odd:
2802 lea 1($block_num),$i1 # even-numbered blocks
2803 lea 3($block_num),$i3
2804 lea 5($block_num),$i5
2805 lea 6($block_num),$block_num
2806 bsf $i1,$i1 # ntz(block)
2807 bsf $i3,$i3
2808 bsf $i5,$i5
2809 shl \$4,$i1 # ntz(block) -> table offset
2810 shl \$4,$i3
2811 shl \$4,$i5
2812
2813 sub \$6,$blocks
2814 jc .Locb_enc_short
2815 jmp .Locb_enc_grandloop
2816
2817 .align 32
2818 .Locb_enc_grandloop:
2819 movdqu `16*0`($inp),$inout0 # load input
2820 movdqu `16*1`($inp),$inout1
2821 movdqu `16*2`($inp),$inout2
2822 movdqu `16*3`($inp),$inout3
2823 movdqu `16*4`($inp),$inout4
2824 movdqu `16*5`($inp),$inout5
2825 lea `16*6`($inp),$inp
2826
2827 call __ocb_encrypt6
2828
2829 movups $inout0,`16*0`($out) # store output
2830 movups $inout1,`16*1`($out)
2831 movups $inout2,`16*2`($out)
2832 movups $inout3,`16*3`($out)
2833 movups $inout4,`16*4`($out)
2834 movups $inout5,`16*5`($out)
2835 lea `16*6`($out),$out
2836 sub \$6,$blocks
2837 jnc .Locb_enc_grandloop
2838
2839 .Locb_enc_short:
2840 add \$6,$blocks
2841 jz .Locb_enc_done
2842
2843 movdqu `16*0`($inp),$inout0
2844 cmp \$2,$blocks
2845 jb .Locb_enc_one
2846 movdqu `16*1`($inp),$inout1
2847 je .Locb_enc_two
2848
2849 movdqu `16*2`($inp),$inout2
2850 cmp \$4,$blocks
2851 jb .Locb_enc_three
2852 movdqu `16*3`($inp),$inout3
2853 je .Locb_enc_four
2854
2855 movdqu `16*4`($inp),$inout4
2856 pxor $inout5,$inout5
2857
2858 call __ocb_encrypt6
2859
2860 movdqa @offset[4],@offset[5]
2861 movups $inout0,`16*0`($out)
2862 movups $inout1,`16*1`($out)
2863 movups $inout2,`16*2`($out)
2864 movups $inout3,`16*3`($out)
2865 movups $inout4,`16*4`($out)
2866
2867 jmp .Locb_enc_done
2868
2869 .align 16
2870 .Locb_enc_one:
2871 movdqa @offset[0],$inout5 # borrow
2872
2873 call __ocb_encrypt1
2874
2875 movdqa $inout5,@offset[5]
2876 movups $inout0,`16*0`($out)
2877 jmp .Locb_enc_done
2878
2879 .align 16
2880 .Locb_enc_two:
2881 pxor $inout2,$inout2
2882 pxor $inout3,$inout3
2883
2884 call __ocb_encrypt4
2885
2886 movdqa @offset[1],@offset[5]
2887 movups $inout0,`16*0`($out)
2888 movups $inout1,`16*1`($out)
2889
2890 jmp .Locb_enc_done
2891
2892 .align 16
2893 .Locb_enc_three:
2894 pxor $inout3,$inout3
2895
2896 call __ocb_encrypt4
2897
2898 movdqa @offset[2],@offset[5]
2899 movups $inout0,`16*0`($out)
2900 movups $inout1,`16*1`($out)
2901 movups $inout2,`16*2`($out)
2902
2903 jmp .Locb_enc_done
2904
2905 .align 16
2906 .Locb_enc_four:
2907 call __ocb_encrypt4
2908
2909 movdqa @offset[3],@offset[5]
2910 movups $inout0,`16*0`($out)
2911 movups $inout1,`16*1`($out)
2912 movups $inout2,`16*2`($out)
2913 movups $inout3,`16*3`($out)
2914
2915 .Locb_enc_done:
2916 pxor $rndkey0,@offset[5] # "remove" round[last]
2917 movdqu $checksum,($checksum_p) # store checksum
2918 movdqu @offset[5],($offset_p) # store last offset_i
2919
2920 xorps %xmm0,%xmm0 # clear register bank
2921 pxor %xmm1,%xmm1
2922 pxor %xmm2,%xmm2
2923 pxor %xmm3,%xmm3
2924 pxor %xmm4,%xmm4
2925 pxor %xmm5,%xmm5
2926 ___
2927 $code.=<<___ if (!$win64);
2928 pxor %xmm6,%xmm6
2929 pxor %xmm7,%xmm7
2930 pxor %xmm8,%xmm8
2931 pxor %xmm9,%xmm9
2932 pxor %xmm10,%xmm10
2933 pxor %xmm11,%xmm11
2934 pxor %xmm12,%xmm12
2935 pxor %xmm13,%xmm13
2936 pxor %xmm14,%xmm14
2937 pxor %xmm15,%xmm15
2938 ___
2939 $code.=<<___ if ($win64);
2940 movaps 0x00(%rsp),%xmm6
2941 movaps %xmm0,0x00(%rsp) # clear stack
2942 movaps 0x10(%rsp),%xmm7
2943 movaps %xmm0,0x10(%rsp)
2944 movaps 0x20(%rsp),%xmm8
2945 movaps %xmm0,0x20(%rsp)
2946 movaps 0x30(%rsp),%xmm9
2947 movaps %xmm0,0x30(%rsp)
2948 movaps 0x40(%rsp),%xmm10
2949 movaps %xmm0,0x40(%rsp)
2950 movaps 0x50(%rsp),%xmm11
2951 movaps %xmm0,0x50(%rsp)
2952 movaps 0x60(%rsp),%xmm12
2953 movaps %xmm0,0x60(%rsp)
2954 movaps 0x70(%rsp),%xmm13
2955 movaps %xmm0,0x70(%rsp)
2956 movaps 0x80(%rsp),%xmm14
2957 movaps %xmm0,0x80(%rsp)
2958 movaps 0x90(%rsp),%xmm15
2959 movaps %xmm0,0x90(%rsp)
2960 lea 0xa0+0x28(%rsp),%rax
2961 .Locb_enc_pop:
2962 lea 0xa0(%rsp),%rsp
2963 ___
2964 $code.=<<___;
2965 pop %r14
2966 pop %r13
2967 pop %r12
2968 pop %rbp
2969 pop %rbx
2970 .Locb_enc_epilogue:
2971 ret
2972 .size aesni_ocb_encrypt,.-aesni_ocb_encrypt
2973
2974 .type __ocb_encrypt6,\@abi-omnipotent
2975 .align 32
2976 __ocb_encrypt6:
2977 pxor $rndkey0l,@offset[5] # offset_i ^ round[0]
2978 movdqu ($L_p,$i1),@offset[1]
2979 movdqa @offset[0],@offset[2]
2980 movdqu ($L_p,$i3),@offset[3]
2981 movdqa @offset[0],@offset[4]
2982 pxor @offset[5],@offset[0]
2983 movdqu ($L_p,$i5),@offset[5]
2984 pxor @offset[0],@offset[1]
2985 pxor $inout0,$checksum # accumulate checksum
2986 pxor @offset[0],$inout0 # input ^ round[0] ^ offset_i
2987 pxor @offset[1],@offset[2]
2988 pxor $inout1,$checksum
2989 pxor @offset[1],$inout1
2990 pxor @offset[2],@offset[3]
2991 pxor $inout2,$checksum
2992 pxor @offset[2],$inout2
2993 pxor @offset[3],@offset[4]
2994 pxor $inout3,$checksum
2995 pxor @offset[3],$inout3
2996 pxor @offset[4],@offset[5]
2997 pxor $inout4,$checksum
2998 pxor @offset[4],$inout4
2999 pxor $inout5,$checksum
3000 pxor @offset[5],$inout5
3001 $movkey 32($key_),$rndkey0
3002
3003 lea 1($block_num),$i1 # even-numbered blocks
3004 lea 3($block_num),$i3
3005 lea 5($block_num),$i5
3006 add \$6,$block_num
3007 pxor $rndkey0l,@offset[0] # offset_i ^ round[last]
3008 bsf $i1,$i1 # ntz(block)
3009 bsf $i3,$i3
3010 bsf $i5,$i5
3011
3012 aesenc $rndkey1,$inout0
3013 aesenc $rndkey1,$inout1
3014 aesenc $rndkey1,$inout2
3015 aesenc $rndkey1,$inout3
3016 pxor $rndkey0l,@offset[1]
3017 pxor $rndkey0l,@offset[2]
3018 aesenc $rndkey1,$inout4
3019 pxor $rndkey0l,@offset[3]
3020 pxor $rndkey0l,@offset[4]
3021 aesenc $rndkey1,$inout5
3022 $movkey 48($key_),$rndkey1
3023 pxor $rndkey0l,@offset[5]
3024
3025 aesenc $rndkey0,$inout0
3026 aesenc $rndkey0,$inout1
3027 aesenc $rndkey0,$inout2
3028 aesenc $rndkey0,$inout3
3029 aesenc $rndkey0,$inout4
3030 aesenc $rndkey0,$inout5
3031 $movkey 64($key_),$rndkey0
3032 shl \$4,$i1 # ntz(block) -> table offset
3033 shl \$4,$i3
3034 jmp .Locb_enc_loop6
3035
3036 .align 32
3037 .Locb_enc_loop6:
3038 aesenc $rndkey1,$inout0
3039 aesenc $rndkey1,$inout1
3040 aesenc $rndkey1,$inout2
3041 aesenc $rndkey1,$inout3
3042 aesenc $rndkey1,$inout4
3043 aesenc $rndkey1,$inout5
3044 $movkey ($key,%rax),$rndkey1
3045 add \$32,%rax
3046
3047 aesenc $rndkey0,$inout0
3048 aesenc $rndkey0,$inout1
3049 aesenc $rndkey0,$inout2
3050 aesenc $rndkey0,$inout3
3051 aesenc $rndkey0,$inout4
3052 aesenc $rndkey0,$inout5
3053 $movkey -16($key,%rax),$rndkey0
3054 jnz .Locb_enc_loop6
3055
3056 aesenc $rndkey1,$inout0
3057 aesenc $rndkey1,$inout1
3058 aesenc $rndkey1,$inout2
3059 aesenc $rndkey1,$inout3
3060 aesenc $rndkey1,$inout4
3061 aesenc $rndkey1,$inout5
3062 $movkey 16($key_),$rndkey1
3063 shl \$4,$i5
3064
3065 aesenclast @offset[0],$inout0
3066 movdqu ($L_p),@offset[0] # L_0 for all odd-numbered blocks
3067 mov %r10,%rax # restore twisted rounds
3068 aesenclast @offset[1],$inout1
3069 aesenclast @offset[2],$inout2
3070 aesenclast @offset[3],$inout3
3071 aesenclast @offset[4],$inout4
3072 aesenclast @offset[5],$inout5
3073 ret
3074 .size __ocb_encrypt6,.-__ocb_encrypt6
3075
3076 .type __ocb_encrypt4,\@abi-omnipotent
3077 .align 32
3078 __ocb_encrypt4:
3079 pxor $rndkey0l,@offset[5] # offset_i ^ round[0]
3080 movdqu ($L_p,$i1),@offset[1]
3081 movdqa @offset[0],@offset[2]
3082 movdqu ($L_p,$i3),@offset[3]
3083 pxor @offset[5],@offset[0]
3084 pxor @offset[0],@offset[1]
3085 pxor $inout0,$checksum # accumulate checksum
3086 pxor @offset[0],$inout0 # input ^ round[0] ^ offset_i
3087 pxor @offset[1],@offset[2]
3088 pxor $inout1,$checksum
3089 pxor @offset[1],$inout1
3090 pxor @offset[2],@offset[3]
3091 pxor $inout2,$checksum
3092 pxor @offset[2],$inout2
3093 pxor $inout3,$checksum
3094 pxor @offset[3],$inout3
3095 $movkey 32($key_),$rndkey0
3096
3097 pxor $rndkey0l,@offset[0] # offset_i ^ round[last]
3098 pxor $rndkey0l,@offset[1]
3099 pxor $rndkey0l,@offset[2]
3100 pxor $rndkey0l,@offset[3]
3101
3102 aesenc $rndkey1,$inout0
3103 aesenc $rndkey1,$inout1
3104 aesenc $rndkey1,$inout2
3105 aesenc $rndkey1,$inout3
3106 $movkey 48($key_),$rndkey1
3107
3108 aesenc $rndkey0,$inout0
3109 aesenc $rndkey0,$inout1
3110 aesenc $rndkey0,$inout2
3111 aesenc $rndkey0,$inout3
3112 $movkey 64($key_),$rndkey0
3113 jmp .Locb_enc_loop4
3114
3115 .align 32
3116 .Locb_enc_loop4:
3117 aesenc $rndkey1,$inout0
3118 aesenc $rndkey1,$inout1
3119 aesenc $rndkey1,$inout2
3120 aesenc $rndkey1,$inout3
3121 $movkey ($key,%rax),$rndkey1
3122 add \$32,%rax
3123
3124 aesenc $rndkey0,$inout0
3125 aesenc $rndkey0,$inout1
3126 aesenc $rndkey0,$inout2
3127 aesenc $rndkey0,$inout3
3128 $movkey -16($key,%rax),$rndkey0
3129 jnz .Locb_enc_loop4
3130
3131 aesenc $rndkey1,$inout0
3132 aesenc $rndkey1,$inout1
3133 aesenc $rndkey1,$inout2
3134 aesenc $rndkey1,$inout3
3135 $movkey 16($key_),$rndkey1
3136 mov %r10,%rax # restore twisted rounds
3137
3138 aesenclast @offset[0],$inout0
3139 aesenclast @offset[1],$inout1
3140 aesenclast @offset[2],$inout2
3141 aesenclast @offset[3],$inout3
3142 ret
3143 .size __ocb_encrypt4,.-__ocb_encrypt4
3144
3145 .type __ocb_encrypt1,\@abi-omnipotent
3146 .align 32
3147 __ocb_encrypt1:
3148 pxor @offset[5],$inout5 # offset_i
3149 pxor $rndkey0l,$inout5 # offset_i ^ round[0]
3150 pxor $inout0,$checksum # accumulate checksum
3151 pxor $inout5,$inout0 # input ^ round[0] ^ offset_i
3152 $movkey 32($key_),$rndkey0
3153
3154 aesenc $rndkey1,$inout0
3155 $movkey 48($key_),$rndkey1
3156 pxor $rndkey0l,$inout5 # offset_i ^ round[last]
3157
3158 aesenc $rndkey0,$inout0
3159 $movkey 64($key_),$rndkey0
3160 jmp .Locb_enc_loop1
3161
3162 .align 32
3163 .Locb_enc_loop1:
3164 aesenc $rndkey1,$inout0
3165 $movkey ($key,%rax),$rndkey1
3166 add \$32,%rax
3167
3168 aesenc $rndkey0,$inout0
3169 $movkey -16($key,%rax),$rndkey0
3170 jnz .Locb_enc_loop1
3171
3172 aesenc $rndkey1,$inout0
3173 $movkey 16($key_),$rndkey1 # redundant in tail
3174 mov %r10,%rax # restore twisted rounds
3175
3176 aesenclast $inout5,$inout0
3177 ret
3178 .size __ocb_encrypt1,.-__ocb_encrypt1
3179
3180 .globl aesni_ocb_decrypt
3181 .type aesni_ocb_decrypt,\@function,6
3182 .align 32
3183 aesni_ocb_decrypt:
3184 lea (%rsp),%rax
3185 push %rbx
3186 push %rbp
3187 push %r12
3188 push %r13
3189 push %r14
3190 ___
3191 $code.=<<___ if ($win64);
3192 lea -0xa0(%rsp),%rsp
3193 movaps %xmm6,0x00(%rsp) # offload everything
3194 movaps %xmm7,0x10(%rsp)
3195 movaps %xmm8,0x20(%rsp)
3196 movaps %xmm9,0x30(%rsp)
3197 movaps %xmm10,0x40(%rsp)
3198 movaps %xmm11,0x50(%rsp)
3199 movaps %xmm12,0x60(%rsp)
3200 movaps %xmm13,0x70(%rsp)
3201 movaps %xmm14,0x80(%rsp)
3202 movaps %xmm15,0x90(%rsp)
3203 .Locb_dec_body:
3204 ___
3205 $code.=<<___;
3206 mov $seventh_arg(%rax),$L_p # 7th argument
3207 mov $seventh_arg+8(%rax),$checksum_p# 8th argument
3208
3209 mov 240($key),$rnds_
3210 mov $key,$key_
3211 shl \$4,$rnds_
3212 $movkey ($key),$rndkey0l # round[0]
3213 $movkey 16($key,$rnds_),$rndkey1 # round[last]
3214
3215 movdqu ($offset_p),@offset[5] # load last offset_i
3216 pxor $rndkey1,$rndkey0l # round[0] ^ round[last]
3217 pxor $rndkey1,@offset[5] # offset_i ^ round[last]
3218
3219 mov \$16+32,$rounds
3220 lea 32($key_,$rnds_),$key
3221 $movkey 16($key_),$rndkey1 # round[1]
3222 sub %r10,%rax # twisted $rounds
3223 mov %rax,%r10 # backup twisted $rounds
3224
3225 movdqu ($L_p),@offset[0] # L_0 for all odd-numbered blocks
3226 movdqu ($checksum_p),$checksum # load checksum
3227
3228 test \$1,$block_num # is first block number odd?
3229 jnz .Locb_dec_odd
3230
3231 bsf $block_num,$i1
3232 add \$1,$block_num
3233 shl \$4,$i1
3234 movdqu ($L_p,$i1),$inout5 # borrow
3235 movdqu ($inp),$inout0
3236 lea 16($inp),$inp
3237
3238 call __ocb_decrypt1
3239
3240 movdqa $inout5,@offset[5]
3241 movups $inout0,($out)
3242 xorps $inout0,$checksum # accumulate checksum
3243 lea 16($out),$out
3244 sub \$1,$blocks
3245 jz .Locb_dec_done
3246
3247 .Locb_dec_odd:
3248 lea 1($block_num),$i1 # even-numbered blocks
3249 lea 3($block_num),$i3
3250 lea 5($block_num),$i5
3251 lea 6($block_num),$block_num
3252 bsf $i1,$i1 # ntz(block)
3253 bsf $i3,$i3
3254 bsf $i5,$i5
3255 shl \$4,$i1 # ntz(block) -> table offset
3256 shl \$4,$i3
3257 shl \$4,$i5
3258
3259 sub \$6,$blocks
3260 jc .Locb_dec_short
3261 jmp .Locb_dec_grandloop
3262
3263 .align 32
3264 .Locb_dec_grandloop:
3265 movdqu `16*0`($inp),$inout0 # load input
3266 movdqu `16*1`($inp),$inout1
3267 movdqu `16*2`($inp),$inout2
3268 movdqu `16*3`($inp),$inout3
3269 movdqu `16*4`($inp),$inout4
3270 movdqu `16*5`($inp),$inout5
3271 lea `16*6`($inp),$inp
3272
3273 call __ocb_decrypt6
3274
3275 movups $inout0,`16*0`($out) # store output
3276 pxor $inout0,$checksum # accumulate checksum
3277 movups $inout1,`16*1`($out)
3278 pxor $inout1,$checksum
3279 movups $inout2,`16*2`($out)
3280 pxor $inout2,$checksum
3281 movups $inout3,`16*3`($out)
3282 pxor $inout3,$checksum
3283 movups $inout4,`16*4`($out)
3284 pxor $inout4,$checksum
3285 movups $inout5,`16*5`($out)
3286 pxor $inout5,$checksum
3287 lea `16*6`($out),$out
3288 sub \$6,$blocks
3289 jnc .Locb_dec_grandloop
3290
3291 .Locb_dec_short:
3292 add \$6,$blocks
3293 jz .Locb_dec_done
3294
3295 movdqu `16*0`($inp),$inout0
3296 cmp \$2,$blocks
3297 jb .Locb_dec_one
3298 movdqu `16*1`($inp),$inout1
3299 je .Locb_dec_two
3300
3301 movdqu `16*2`($inp),$inout2
3302 cmp \$4,$blocks
3303 jb .Locb_dec_three
3304 movdqu `16*3`($inp),$inout3
3305 je .Locb_dec_four
3306
3307 movdqu `16*4`($inp),$inout4
3308 pxor $inout5,$inout5
3309
3310 call __ocb_decrypt6
3311
3312 movdqa @offset[4],@offset[5]
3313 movups $inout0,`16*0`($out) # store output
3314 pxor $inout0,$checksum # accumulate checksum
3315 movups $inout1,`16*1`($out)
3316 pxor $inout1,$checksum
3317 movups $inout2,`16*2`($out)
3318 pxor $inout2,$checksum
3319 movups $inout3,`16*3`($out)
3320 pxor $inout3,$checksum
3321 movups $inout4,`16*4`($out)
3322 pxor $inout4,$checksum
3323
3324 jmp .Locb_dec_done
3325
3326 .align 16
3327 .Locb_dec_one:
3328 movdqa @offset[0],$inout5 # borrow
3329
3330 call __ocb_decrypt1
3331
3332 movdqa $inout5,@offset[5]
3333 movups $inout0,`16*0`($out) # store output
3334 xorps $inout0,$checksum # accumulate checksum
3335 jmp .Locb_dec_done
3336
3337 .align 16
3338 .Locb_dec_two:
3339 pxor $inout2,$inout2
3340 pxor $inout3,$inout3
3341
3342 call __ocb_decrypt4
3343
3344 movdqa @offset[1],@offset[5]
3345 movups $inout0,`16*0`($out) # store output
3346 xorps $inout0,$checksum # accumulate checksum
3347 movups $inout1,`16*1`($out)
3348 xorps $inout1,$checksum
3349
3350 jmp .Locb_dec_done
3351
3352 .align 16
3353 .Locb_dec_three:
3354 pxor $inout3,$inout3
3355
3356 call __ocb_decrypt4
3357
3358 movdqa @offset[2],@offset[5]
3359 movups $inout0,`16*0`($out) # store output
3360 xorps $inout0,$checksum # accumulate checksum
3361 movups $inout1,`16*1`($out)
3362 xorps $inout1,$checksum
3363 movups $inout2,`16*2`($out)
3364 xorps $inout2,$checksum
3365
3366 jmp .Locb_dec_done
3367
3368 .align 16
3369 .Locb_dec_four:
3370 call __ocb_decrypt4
3371
3372 movdqa @offset[3],@offset[5]
3373 movups $inout0,`16*0`($out) # store output
3374 pxor $inout0,$checksum # accumulate checksum
3375 movups $inout1,`16*1`($out)
3376 pxor $inout1,$checksum
3377 movups $inout2,`16*2`($out)
3378 pxor $inout2,$checksum
3379 movups $inout3,`16*3`($out)
3380 pxor $inout3,$checksum
3381
3382 .Locb_dec_done:
3383 pxor $rndkey0,@offset[5] # "remove" round[last]
3384 movdqu $checksum,($checksum_p) # store checksum
3385 movdqu @offset[5],($offset_p) # store last offset_i
3386
3387 xorps %xmm0,%xmm0 # clear register bank
3388 pxor %xmm1,%xmm1
3389 pxor %xmm2,%xmm2
3390 pxor %xmm3,%xmm3
3391 pxor %xmm4,%xmm4
3392 pxor %xmm5,%xmm5
3393 ___
3394 $code.=<<___ if (!$win64);
3395 pxor %xmm6,%xmm6
3396 pxor %xmm7,%xmm7
3397 pxor %xmm8,%xmm8
3398 pxor %xmm9,%xmm9
3399 pxor %xmm10,%xmm10
3400 pxor %xmm11,%xmm11
3401 pxor %xmm12,%xmm12
3402 pxor %xmm13,%xmm13
3403 pxor %xmm14,%xmm14
3404 pxor %xmm15,%xmm15
3405 ___
3406 $code.=<<___ if ($win64);
3407 movaps 0x00(%rsp),%xmm6
3408 movaps %xmm0,0x00(%rsp) # clear stack
3409 movaps 0x10(%rsp),%xmm7
3410 movaps %xmm0,0x10(%rsp)
3411 movaps 0x20(%rsp),%xmm8
3412 movaps %xmm0,0x20(%rsp)
3413 movaps 0x30(%rsp),%xmm9
3414 movaps %xmm0,0x30(%rsp)
3415 movaps 0x40(%rsp),%xmm10
3416 movaps %xmm0,0x40(%rsp)
3417 movaps 0x50(%rsp),%xmm11
3418 movaps %xmm0,0x50(%rsp)
3419 movaps 0x60(%rsp),%xmm12
3420 movaps %xmm0,0x60(%rsp)
3421 movaps 0x70(%rsp),%xmm13
3422 movaps %xmm0,0x70(%rsp)
3423 movaps 0x80(%rsp),%xmm14
3424 movaps %xmm0,0x80(%rsp)
3425 movaps 0x90(%rsp),%xmm15
3426 movaps %xmm0,0x90(%rsp)
3427 lea 0xa0+0x28(%rsp),%rax
3428 .Locb_dec_pop:
3429 lea 0xa0(%rsp),%rsp
3430 ___
3431 $code.=<<___;
3432 pop %r14
3433 pop %r13
3434 pop %r12
3435 pop %rbp
3436 pop %rbx
3437 .Locb_dec_epilogue:
3438 ret
3439 .size aesni_ocb_decrypt,.-aesni_ocb_decrypt
3440
3441 .type __ocb_decrypt6,\@abi-omnipotent
3442 .align 32
3443 __ocb_decrypt6:
3444 pxor $rndkey0l,@offset[5] # offset_i ^ round[0]
3445 movdqu ($L_p,$i1),@offset[1]
3446 movdqa @offset[0],@offset[2]
3447 movdqu ($L_p,$i3),@offset[3]
3448 movdqa @offset[0],@offset[4]
3449 pxor @offset[5],@offset[0]
3450 movdqu ($L_p,$i5),@offset[5]
3451 pxor @offset[0],@offset[1]
3452 pxor @offset[0],$inout0 # input ^ round[0] ^ offset_i
3453 pxor @offset[1],@offset[2]
3454 pxor @offset[1],$inout1
3455 pxor @offset[2],@offset[3]
3456 pxor @offset[2],$inout2
3457 pxor @offset[3],@offset[4]
3458 pxor @offset[3],$inout3
3459 pxor @offset[4],@offset[5]
3460 pxor @offset[4],$inout4
3461 pxor @offset[5],$inout5
3462 $movkey 32($key_),$rndkey0
3463
3464 lea 1($block_num),$i1 # even-numbered blocks
3465 lea 3($block_num),$i3
3466 lea 5($block_num),$i5
3467 add \$6,$block_num
3468 pxor $rndkey0l,@offset[0] # offset_i ^ round[last]
3469 bsf $i1,$i1 # ntz(block)
3470 bsf $i3,$i3
3471 bsf $i5,$i5
3472
3473 aesdec $rndkey1,$inout0
3474 aesdec $rndkey1,$inout1
3475 aesdec $rndkey1,$inout2
3476 aesdec $rndkey1,$inout3
3477 pxor $rndkey0l,@offset[1]
3478 pxor $rndkey0l,@offset[2]
3479 aesdec $rndkey1,$inout4
3480 pxor $rndkey0l,@offset[3]
3481 pxor $rndkey0l,@offset[4]
3482 aesdec $rndkey1,$inout5
3483 $movkey 48($key_),$rndkey1
3484 pxor $rndkey0l,@offset[5]
3485
3486 aesdec $rndkey0,$inout0
3487 aesdec $rndkey0,$inout1
3488 aesdec $rndkey0,$inout2
3489 aesdec $rndkey0,$inout3
3490 aesdec $rndkey0,$inout4
3491 aesdec $rndkey0,$inout5
3492 $movkey 64($key_),$rndkey0
3493 shl \$4,$i1 # ntz(block) -> table offset
3494 shl \$4,$i3
3495 jmp .Locb_dec_loop6
3496
3497 .align 32
3498 .Locb_dec_loop6:
3499 aesdec $rndkey1,$inout0
3500 aesdec $rndkey1,$inout1
3501 aesdec $rndkey1,$inout2
3502 aesdec $rndkey1,$inout3
3503 aesdec $rndkey1,$inout4
3504 aesdec $rndkey1,$inout5
3505 $movkey ($key,%rax),$rndkey1
3506 add \$32,%rax
3507
3508 aesdec $rndkey0,$inout0
3509 aesdec $rndkey0,$inout1
3510 aesdec $rndkey0,$inout2
3511 aesdec $rndkey0,$inout3
3512 aesdec $rndkey0,$inout4
3513 aesdec $rndkey0,$inout5
3514 $movkey -16($key,%rax),$rndkey0
3515 jnz .Locb_dec_loop6
3516
3517 aesdec $rndkey1,$inout0
3518 aesdec $rndkey1,$inout1
3519 aesdec $rndkey1,$inout2
3520 aesdec $rndkey1,$inout3
3521 aesdec $rndkey1,$inout4
3522 aesdec $rndkey1,$inout5
3523 $movkey 16($key_),$rndkey1
3524 shl \$4,$i5
3525
3526 aesdeclast @offset[0],$inout0
3527 movdqu ($L_p),@offset[0] # L_0 for all odd-numbered blocks
3528 mov %r10,%rax # restore twisted rounds
3529 aesdeclast @offset[1],$inout1
3530 aesdeclast @offset[2],$inout2
3531 aesdeclast @offset[3],$inout3
3532 aesdeclast @offset[4],$inout4
3533 aesdeclast @offset[5],$inout5
3534 ret
3535 .size __ocb_decrypt6,.-__ocb_decrypt6
3536
3537 .type __ocb_decrypt4,\@abi-omnipotent
3538 .align 32
3539 __ocb_decrypt4:
3540 pxor $rndkey0l,@offset[5] # offset_i ^ round[0]
3541 movdqu ($L_p,$i1),@offset[1]
3542 movdqa @offset[0],@offset[2]
3543 movdqu ($L_p,$i3),@offset[3]
3544 pxor @offset[5],@offset[0]
3545 pxor @offset[0],@offset[1]
3546 pxor @offset[0],$inout0 # input ^ round[0] ^ offset_i
3547 pxor @offset[1],@offset[2]
3548 pxor @offset[1],$inout1
3549 pxor @offset[2],@offset[3]
3550 pxor @offset[2],$inout2
3551 pxor @offset[3],$inout3
3552 $movkey 32($key_),$rndkey0
3553
3554 pxor $rndkey0l,@offset[0] # offset_i ^ round[last]
3555 pxor $rndkey0l,@offset[1]
3556 pxor $rndkey0l,@offset[2]
3557 pxor $rndkey0l,@offset[3]
3558
3559 aesdec $rndkey1,$inout0
3560 aesdec $rndkey1,$inout1
3561 aesdec $rndkey1,$inout2
3562 aesdec $rndkey1,$inout3
3563 $movkey 48($key_),$rndkey1
3564
3565 aesdec $rndkey0,$inout0
3566 aesdec $rndkey0,$inout1
3567 aesdec $rndkey0,$inout2
3568 aesdec $rndkey0,$inout3
3569 $movkey 64($key_),$rndkey0
3570 jmp .Locb_dec_loop4
3571
3572 .align 32
3573 .Locb_dec_loop4:
3574 aesdec $rndkey1,$inout0
3575 aesdec $rndkey1,$inout1
3576 aesdec $rndkey1,$inout2
3577 aesdec $rndkey1,$inout3
3578 $movkey ($key,%rax),$rndkey1
3579 add \$32,%rax
3580
3581 aesdec $rndkey0,$inout0
3582 aesdec $rndkey0,$inout1
3583 aesdec $rndkey0,$inout2
3584 aesdec $rndkey0,$inout3
3585 $movkey -16($key,%rax),$rndkey0
3586 jnz .Locb_dec_loop4
3587
3588 aesdec $rndkey1,$inout0
3589 aesdec $rndkey1,$inout1
3590 aesdec $rndkey1,$inout2
3591 aesdec $rndkey1,$inout3
3592 $movkey 16($key_),$rndkey1
3593 mov %r10,%rax # restore twisted rounds
3594
3595 aesdeclast @offset[0],$inout0
3596 aesdeclast @offset[1],$inout1
3597 aesdeclast @offset[2],$inout2
3598 aesdeclast @offset[3],$inout3
3599 ret
3600 .size __ocb_decrypt4,.-__ocb_decrypt4
3601
3602 .type __ocb_decrypt1,\@abi-omnipotent
3603 .align 32
3604 __ocb_decrypt1:
3605 pxor @offset[5],$inout5 # offset_i
3606 pxor $rndkey0l,$inout5 # offset_i ^ round[0]
3607 pxor $inout5,$inout0 # input ^ round[0] ^ offset_i
3608 $movkey 32($key_),$rndkey0
3609
3610 aesdec $rndkey1,$inout0
3611 $movkey 48($key_),$rndkey1
3612 pxor $rndkey0l,$inout5 # offset_i ^ round[last]
3613
3614 aesdec $rndkey0,$inout0
3615 $movkey 64($key_),$rndkey0
3616 jmp .Locb_dec_loop1
3617
3618 .align 32
3619 .Locb_dec_loop1:
3620 aesdec $rndkey1,$inout0
3621 $movkey ($key,%rax),$rndkey1
3622 add \$32,%rax
3623
3624 aesdec $rndkey0,$inout0
3625 $movkey -16($key,%rax),$rndkey0
3626 jnz .Locb_dec_loop1
3627
3628 aesdec $rndkey1,$inout0
3629 $movkey 16($key_),$rndkey1 # redundant in tail
3630 mov %r10,%rax # restore twisted rounds
3631
3632 aesdeclast $inout5,$inout0
3633 ret
3634 .size __ocb_decrypt1,.-__ocb_decrypt1
3635 ___
3636 } }}
3637 \f
3638 ########################################################################
3639 # void $PREFIX_cbc_encrypt (const void *inp, void *out,
3640 # size_t length, const AES_KEY *key,
3641 # unsigned char *ivp,const int enc);
3642 {
3643 my $frame_size = 0x10 + ($win64?0xa0:0); # used in decrypt
3644 my ($iv,$in0,$in1,$in2,$in3,$in4)=map("%xmm$_",(10..15));
3645 my $inp_=$key_;
3646
3647 $code.=<<___;
3648 .globl ${PREFIX}_cbc_encrypt
3649 .type ${PREFIX}_cbc_encrypt,\@function,6
3650 .align 16
3651 ${PREFIX}_cbc_encrypt:
3652 test $len,$len # check length
3653 jz .Lcbc_ret
3654
3655 mov 240($key),$rnds_ # key->rounds
3656 mov $key,$key_ # backup $key
3657 test %r9d,%r9d # 6th argument
3658 jz .Lcbc_decrypt
3659 #--------------------------- CBC ENCRYPT ------------------------------#
3660 movups ($ivp),$inout0 # load iv as initial state
3661 mov $rnds_,$rounds
3662 cmp \$16,$len
3663 jb .Lcbc_enc_tail
3664 sub \$16,$len
3665 jmp .Lcbc_enc_loop
3666 .align 16
3667 .Lcbc_enc_loop:
3668 movups ($inp),$inout1 # load input
3669 lea 16($inp),$inp
3670 #xorps $inout1,$inout0
3671 ___
3672 &aesni_generate1("enc",$key,$rounds,$inout0,$inout1);
3673 $code.=<<___;
3674 mov $rnds_,$rounds # restore $rounds
3675 mov $key_,$key # restore $key
3676 movups $inout0,0($out) # store output
3677 lea 16($out),$out
3678 sub \$16,$len
3679 jnc .Lcbc_enc_loop
3680 add \$16,$len
3681 jnz .Lcbc_enc_tail
3682 pxor $rndkey0,$rndkey0 # clear register bank
3683 pxor $rndkey1,$rndkey1
3684 movups $inout0,($ivp)
3685 pxor $inout0,$inout0
3686 pxor $inout1,$inout1
3687 jmp .Lcbc_ret
3688
3689 .Lcbc_enc_tail:
3690 mov $len,%rcx # zaps $key
3691 xchg $inp,$out # $inp is %rsi and $out is %rdi now
3692 .long 0x9066A4F3 # rep movsb
3693 mov \$16,%ecx # zero tail
3694 sub $len,%rcx
3695 xor %eax,%eax
3696 .long 0x9066AAF3 # rep stosb
3697 lea -16(%rdi),%rdi # rewind $out by 1 block
3698 mov $rnds_,$rounds # restore $rounds
3699 mov %rdi,%rsi # $inp and $out are the same
3700 mov $key_,$key # restore $key
3701 xor $len,$len # len=16
3702 jmp .Lcbc_enc_loop # one more spin
3703 \f#--------------------------- CBC DECRYPT ------------------------------#
3704 .align 16
3705 .Lcbc_decrypt:
3706 cmp \$16,$len
3707 jne .Lcbc_decrypt_bulk
3708
3709 # handle single block without allocating stack frame,
3710 # useful in ciphertext stealing mode
3711 movdqu ($inp),$inout0 # load input
3712 movdqu ($ivp),$inout1 # load iv
3713 movdqa $inout0,$inout2 # future iv
3714 ___
3715 &aesni_generate1("dec",$key,$rnds_);
3716 $code.=<<___;
3717 pxor $rndkey0,$rndkey0 # clear register bank
3718 pxor $rndkey1,$rndkey1
3719 movdqu $inout2,($ivp) # store iv
3720 xorps $inout1,$inout0 # ^=iv
3721 pxor $inout1,$inout1
3722 movups $inout0,($out) # store output
3723 pxor $inout0,$inout0
3724 jmp .Lcbc_ret
3725 .align 16
3726 .Lcbc_decrypt_bulk:
3727 lea (%rsp),%rax
3728 push %rbp
3729 sub \$$frame_size,%rsp
3730 and \$-16,%rsp # Linux kernel stack can be incorrectly seeded
3731 ___
3732 $code.=<<___ if ($win64);
3733 movaps %xmm6,0x10(%rsp)
3734 movaps %xmm7,0x20(%rsp)
3735 movaps %xmm8,0x30(%rsp)
3736 movaps %xmm9,0x40(%rsp)
3737 movaps %xmm10,0x50(%rsp)
3738 movaps %xmm11,0x60(%rsp)
3739 movaps %xmm12,0x70(%rsp)
3740 movaps %xmm13,0x80(%rsp)
3741 movaps %xmm14,0x90(%rsp)
3742 movaps %xmm15,0xa0(%rsp)
3743 .Lcbc_decrypt_body:
3744 ___
3745 $code.=<<___;
3746 lea -8(%rax),%rbp
3747 movups ($ivp),$iv
3748 mov $rnds_,$rounds
3749 cmp \$0x50,$len
3750 jbe .Lcbc_dec_tail
3751
3752 $movkey ($key),$rndkey0
3753 movdqu 0x00($inp),$inout0 # load input
3754 movdqu 0x10($inp),$inout1
3755 movdqa $inout0,$in0
3756 movdqu 0x20($inp),$inout2
3757 movdqa $inout1,$in1
3758 movdqu 0x30($inp),$inout3
3759 movdqa $inout2,$in2
3760 movdqu 0x40($inp),$inout4
3761 movdqa $inout3,$in3
3762 movdqu 0x50($inp),$inout5
3763 movdqa $inout4,$in4
3764 mov OPENSSL_ia32cap_P+4(%rip),%r9d
3765 cmp \$0x70,$len
3766 jbe .Lcbc_dec_six_or_seven
3767
3768 and \$`1<<26|1<<22`,%r9d # isolate XSAVE+MOVBE
3769 sub \$0x50,$len # $len is biased by -5*16
3770 cmp \$`1<<22`,%r9d # check for MOVBE without XSAVE
3771 je .Lcbc_dec_loop6_enter # [which denotes Atom Silvermont]
3772 sub \$0x20,$len # $len is biased by -7*16
3773 lea 0x70($key),$key # size optimization
3774 jmp .Lcbc_dec_loop8_enter
3775 .align 16
3776 .Lcbc_dec_loop8:
3777 movups $inout7,($out)
3778 lea 0x10($out),$out
3779 .Lcbc_dec_loop8_enter:
3780 movdqu 0x60($inp),$inout6
3781 pxor $rndkey0,$inout0
3782 movdqu 0x70($inp),$inout7
3783 pxor $rndkey0,$inout1
3784 $movkey 0x10-0x70($key),$rndkey1
3785 pxor $rndkey0,$inout2
3786 xor $inp_,$inp_
3787 cmp \$0x70,$len # is there at least 0x60 bytes ahead?
3788 pxor $rndkey0,$inout3
3789 pxor $rndkey0,$inout4
3790 pxor $rndkey0,$inout5
3791 pxor $rndkey0,$inout6
3792
3793 aesdec $rndkey1,$inout0
3794 pxor $rndkey0,$inout7
3795 $movkey 0x20-0x70($key),$rndkey0
3796 aesdec $rndkey1,$inout1
3797 aesdec $rndkey1,$inout2
3798 aesdec $rndkey1,$inout3
3799 aesdec $rndkey1,$inout4
3800 aesdec $rndkey1,$inout5
3801 aesdec $rndkey1,$inout6
3802 setnc ${inp_}b
3803 shl \$7,$inp_
3804 aesdec $rndkey1,$inout7
3805 add $inp,$inp_
3806 $movkey 0x30-0x70($key),$rndkey1
3807 ___
3808 for($i=1;$i<12;$i++) {
3809 my $rndkeyx = ($i&1)?$rndkey0:$rndkey1;
3810 $code.=<<___ if ($i==7);
3811 cmp \$11,$rounds
3812 ___
3813 $code.=<<___;
3814 aesdec $rndkeyx,$inout0
3815 aesdec $rndkeyx,$inout1
3816 aesdec $rndkeyx,$inout2
3817 aesdec $rndkeyx,$inout3
3818 aesdec $rndkeyx,$inout4
3819 aesdec $rndkeyx,$inout5
3820 aesdec $rndkeyx,$inout6
3821 aesdec $rndkeyx,$inout7
3822 $movkey `0x30+0x10*$i`-0x70($key),$rndkeyx
3823 ___
3824 $code.=<<___ if ($i<6 || (!($i&1) && $i>7));
3825 nop
3826 ___
3827 $code.=<<___ if ($i==7);
3828 jb .Lcbc_dec_done
3829 ___
3830 $code.=<<___ if ($i==9);
3831 je .Lcbc_dec_done
3832 ___
3833 $code.=<<___ if ($i==11);
3834 jmp .Lcbc_dec_done
3835 ___
3836 }
3837 $code.=<<___;
3838 .align 16
3839 .Lcbc_dec_done:
3840 aesdec $rndkey1,$inout0
3841 aesdec $rndkey1,$inout1
3842 pxor $rndkey0,$iv
3843 pxor $rndkey0,$in0
3844 aesdec $rndkey1,$inout2
3845 aesdec $rndkey1,$inout3
3846 pxor $rndkey0,$in1
3847 pxor $rndkey0,$in2
3848 aesdec $rndkey1,$inout4
3849 aesdec $rndkey1,$inout5
3850 pxor $rndkey0,$in3
3851 pxor $rndkey0,$in4
3852 aesdec $rndkey1,$inout6
3853 aesdec $rndkey1,$inout7
3854 movdqu 0x50($inp),$rndkey1
3855
3856 aesdeclast $iv,$inout0
3857 movdqu 0x60($inp),$iv # borrow $iv
3858 pxor $rndkey0,$rndkey1
3859 aesdeclast $in0,$inout1
3860 pxor $rndkey0,$iv
3861 movdqu 0x70($inp),$rndkey0 # next IV
3862 aesdeclast $in1,$inout2
3863 lea 0x80($inp),$inp
3864 movdqu 0x00($inp_),$in0
3865 aesdeclast $in2,$inout3
3866 aesdeclast $in3,$inout4
3867 movdqu 0x10($inp_),$in1
3868 movdqu 0x20($inp_),$in2
3869 aesdeclast $in4,$inout5
3870 aesdeclast $rndkey1,$inout6
3871 movdqu 0x30($inp_),$in3
3872 movdqu 0x40($inp_),$in4
3873 aesdeclast $iv,$inout7
3874 movdqa $rndkey0,$iv # return $iv
3875 movdqu 0x50($inp_),$rndkey1
3876 $movkey -0x70($key),$rndkey0
3877
3878 movups $inout0,($out) # store output
3879 movdqa $in0,$inout0
3880 movups $inout1,0x10($out)
3881 movdqa $in1,$inout1
3882 movups $inout2,0x20($out)
3883 movdqa $in2,$inout2
3884 movups $inout3,0x30($out)
3885 movdqa $in3,$inout3
3886 movups $inout4,0x40($out)
3887 movdqa $in4,$inout4
3888 movups $inout5,0x50($out)
3889 movdqa $rndkey1,$inout5
3890 movups $inout6,0x60($out)
3891 lea 0x70($out),$out
3892
3893 sub \$0x80,$len
3894 ja .Lcbc_dec_loop8
3895
3896 movaps $inout7,$inout0
3897 lea -0x70($key),$key
3898 add \$0x70,$len
3899 jle .Lcbc_dec_clear_tail_collected
3900 movups $inout7,($out)
3901 lea 0x10($out),$out
3902 cmp \$0x50,$len
3903 jbe .Lcbc_dec_tail
3904
3905 movaps $in0,$inout0
3906 .Lcbc_dec_six_or_seven:
3907 cmp \$0x60,$len
3908 ja .Lcbc_dec_seven
3909
3910 movaps $inout5,$inout6
3911 call _aesni_decrypt6
3912 pxor $iv,$inout0 # ^= IV
3913 movaps $inout6,$iv
3914 pxor $in0,$inout1
3915 movdqu $inout0,($out)
3916 pxor $in1,$inout2
3917 movdqu $inout1,0x10($out)
3918 pxor $inout1,$inout1 # clear register bank
3919 pxor $in2,$inout3
3920 movdqu $inout2,0x20($out)
3921 pxor $inout2,$inout2
3922 pxor $in3,$inout4
3923 movdqu $inout3,0x30($out)
3924 pxor $inout3,$inout3
3925 pxor $in4,$inout5
3926 movdqu $inout4,0x40($out)
3927 pxor $inout4,$inout4
3928 lea 0x50($out),$out
3929 movdqa $inout5,$inout0
3930 pxor $inout5,$inout5
3931 jmp .Lcbc_dec_tail_collected
3932
3933 .align 16
3934 .Lcbc_dec_seven:
3935 movups 0x60($inp),$inout6
3936 xorps $inout7,$inout7
3937 call _aesni_decrypt8
3938 movups 0x50($inp),$inout7
3939 pxor $iv,$inout0 # ^= IV
3940 movups 0x60($inp),$iv
3941 pxor $in0,$inout1
3942 movdqu $inout0,($out)
3943 pxor $in1,$inout2
3944 movdqu $inout1,0x10($out)
3945 pxor $inout1,$inout1 # clear register bank
3946 pxor $in2,$inout3
3947 movdqu $inout2,0x20($out)
3948 pxor $inout2,$inout2
3949 pxor $in3,$inout4
3950 movdqu $inout3,0x30($out)
3951 pxor $inout3,$inout3
3952 pxor $in4,$inout5
3953 movdqu $inout4,0x40($out)
3954 pxor $inout4,$inout4
3955 pxor $inout7,$inout6
3956 movdqu $inout5,0x50($out)
3957 pxor $inout5,$inout5
3958 lea 0x60($out),$out
3959 movdqa $inout6,$inout0
3960 pxor $inout6,$inout6
3961 pxor $inout7,$inout7
3962 jmp .Lcbc_dec_tail_collected
3963
3964 .align 16
3965 .Lcbc_dec_loop6:
3966 movups $inout5,($out)
3967 lea 0x10($out),$out
3968 movdqu 0x00($inp),$inout0 # load input
3969 movdqu 0x10($inp),$inout1
3970 movdqa $inout0,$in0
3971 movdqu 0x20($inp),$inout2
3972 movdqa $inout1,$in1
3973 movdqu 0x30($inp),$inout3
3974 movdqa $inout2,$in2
3975 movdqu 0x40($inp),$inout4
3976 movdqa $inout3,$in3
3977 movdqu 0x50($inp),$inout5
3978 movdqa $inout4,$in4
3979 .Lcbc_dec_loop6_enter:
3980 lea 0x60($inp),$inp
3981 movdqa $inout5,$inout6
3982
3983 call _aesni_decrypt6
3984
3985 pxor $iv,$inout0 # ^= IV
3986 movdqa $inout6,$iv
3987 pxor $in0,$inout1
3988 movdqu $inout0,($out)
3989 pxor $in1,$inout2
3990 movdqu $inout1,0x10($out)
3991 pxor $in2,$inout3
3992 movdqu $inout2,0x20($out)
3993 pxor $in3,$inout4
3994 mov $key_,$key
3995 movdqu $inout3,0x30($out)
3996 pxor $in4,$inout5
3997 mov $rnds_,$rounds
3998 movdqu $inout4,0x40($out)
3999 lea 0x50($out),$out
4000 sub \$0x60,$len
4001 ja .Lcbc_dec_loop6
4002
4003 movdqa $inout5,$inout0
4004 add \$0x50,$len
4005 jle .Lcbc_dec_clear_tail_collected
4006 movups $inout5,($out)
4007 lea 0x10($out),$out
4008
4009 .Lcbc_dec_tail:
4010 movups ($inp),$inout0
4011 sub \$0x10,$len
4012 jbe .Lcbc_dec_one # $len is 1*16 or less
4013
4014 movups 0x10($inp),$inout1
4015 movaps $inout0,$in0
4016 sub \$0x10,$len
4017 jbe .Lcbc_dec_two # $len is 2*16 or less
4018
4019 movups 0x20($inp),$inout2
4020 movaps $inout1,$in1
4021 sub \$0x10,$len
4022 jbe .Lcbc_dec_three # $len is 3*16 or less
4023
4024 movups 0x30($inp),$inout3
4025 movaps $inout2,$in2
4026 sub \$0x10,$len
4027 jbe .Lcbc_dec_four # $len is 4*16 or less
4028
4029 movups 0x40($inp),$inout4 # $len is 5*16 or less
4030 movaps $inout3,$in3
4031 movaps $inout4,$in4
4032 xorps $inout5,$inout5
4033 call _aesni_decrypt6
4034 pxor $iv,$inout0
4035 movaps $in4,$iv
4036 pxor $in0,$inout1
4037 movdqu $inout0,($out)
4038 pxor $in1,$inout2
4039 movdqu $inout1,0x10($out)
4040 pxor $inout1,$inout1 # clear register bank
4041 pxor $in2,$inout3
4042 movdqu $inout2,0x20($out)
4043 pxor $inout2,$inout2
4044 pxor $in3,$inout4
4045 movdqu $inout3,0x30($out)
4046 pxor $inout3,$inout3
4047 lea 0x40($out),$out
4048 movdqa $inout4,$inout0
4049 pxor $inout4,$inout4
4050 pxor $inout5,$inout5
4051 sub \$0x10,$len
4052 jmp .Lcbc_dec_tail_collected
4053
4054 .align 16
4055 .Lcbc_dec_one:
4056 movaps $inout0,$in0
4057 ___
4058 &aesni_generate1("dec",$key,$rounds);
4059 $code.=<<___;
4060 xorps $iv,$inout0
4061 movaps $in0,$iv
4062 jmp .Lcbc_dec_tail_collected
4063 .align 16
4064 .Lcbc_dec_two:
4065 movaps $inout1,$in1
4066 call _aesni_decrypt2
4067 pxor $iv,$inout0
4068 movaps $in1,$iv
4069 pxor $in0,$inout1
4070 movdqu $inout0,($out)
4071 movdqa $inout1,$inout0
4072 pxor $inout1,$inout1 # clear register bank
4073 lea 0x10($out),$out
4074 jmp .Lcbc_dec_tail_collected
4075 .align 16
4076 .Lcbc_dec_three:
4077 movaps $inout2,$in2
4078 call _aesni_decrypt3
4079 pxor $iv,$inout0
4080 movaps $in2,$iv
4081 pxor $in0,$inout1
4082 movdqu $inout0,($out)
4083 pxor $in1,$inout2
4084 movdqu $inout1,0x10($out)
4085 pxor $inout1,$inout1 # clear register bank
4086 movdqa $inout2,$inout0
4087 pxor $inout2,$inout2
4088 lea 0x20($out),$out
4089 jmp .Lcbc_dec_tail_collected
4090 .align 16
4091 .Lcbc_dec_four:
4092 movaps $inout3,$in3
4093 call _aesni_decrypt4
4094 pxor $iv,$inout0
4095 movaps $in3,$iv
4096 pxor $in0,$inout1
4097 movdqu $inout0,($out)
4098 pxor $in1,$inout2
4099 movdqu $inout1,0x10($out)
4100 pxor $inout1,$inout1 # clear register bank
4101 pxor $in2,$inout3
4102 movdqu $inout2,0x20($out)
4103 pxor $inout2,$inout2
4104 movdqa $inout3,$inout0
4105 pxor $inout3,$inout3
4106 lea 0x30($out),$out
4107 jmp .Lcbc_dec_tail_collected
4108
4109 .align 16
4110 .Lcbc_dec_clear_tail_collected:
4111 pxor $inout1,$inout1 # clear register bank
4112 pxor $inout2,$inout2
4113 pxor $inout3,$inout3
4114 ___
4115 $code.=<<___ if (!$win64);
4116 pxor $inout4,$inout4 # %xmm6..9
4117 pxor $inout5,$inout5
4118 pxor $inout6,$inout6
4119 pxor $inout7,$inout7
4120 ___
4121 $code.=<<___;
4122 .Lcbc_dec_tail_collected:
4123 movups $iv,($ivp)
4124 and \$15,$len
4125 jnz .Lcbc_dec_tail_partial
4126 movups $inout0,($out)
4127 pxor $inout0,$inout0
4128 jmp .Lcbc_dec_ret
4129 .align 16
4130 .Lcbc_dec_tail_partial:
4131 movaps $inout0,(%rsp)
4132 pxor $inout0,$inout0
4133 mov \$16,%rcx
4134 mov $out,%rdi
4135 sub $len,%rcx
4136 lea (%rsp),%rsi
4137 .long 0x9066A4F3 # rep movsb
4138 movdqa $inout0,(%rsp)
4139
4140 .Lcbc_dec_ret:
4141 xorps $rndkey0,$rndkey0 # %xmm0
4142 pxor $rndkey1,$rndkey1
4143 ___
4144 $code.=<<___ if ($win64);
4145 movaps 0x10(%rsp),%xmm6
4146 movaps %xmm0,0x10(%rsp) # clear stack
4147 movaps 0x20(%rsp),%xmm7
4148 movaps %xmm0,0x20(%rsp)
4149 movaps 0x30(%rsp),%xmm8
4150 movaps %xmm0,0x30(%rsp)
4151 movaps 0x40(%rsp),%xmm9
4152 movaps %xmm0,0x40(%rsp)
4153 movaps 0x50(%rsp),%xmm10
4154 movaps %xmm0,0x50(%rsp)
4155 movaps 0x60(%rsp),%xmm11
4156 movaps %xmm0,0x60(%rsp)
4157 movaps 0x70(%rsp),%xmm12
4158 movaps %xmm0,0x70(%rsp)
4159 movaps 0x80(%rsp),%xmm13
4160 movaps %xmm0,0x80(%rsp)
4161 movaps 0x90(%rsp),%xmm14
4162 movaps %xmm0,0x90(%rsp)
4163 movaps 0xa0(%rsp),%xmm15
4164 movaps %xmm0,0xa0(%rsp)
4165 ___
4166 $code.=<<___;
4167 lea (%rbp),%rsp
4168 pop %rbp
4169 .Lcbc_ret:
4170 ret
4171 .size ${PREFIX}_cbc_encrypt,.-${PREFIX}_cbc_encrypt
4172 ___
4173 } \f
4174 # int ${PREFIX}_set_decrypt_key(const unsigned char *inp,
4175 # int bits, AES_KEY *key)
4176 #
4177 # input: $inp user-supplied key
4178 # $bits $inp length in bits
4179 # $key pointer to key schedule
4180 # output: %eax 0 denoting success, -1 or -2 - failure (see C)
4181 # *$key key schedule
4182 #
4183 { my ($inp,$bits,$key) = @_4args;
4184 $bits =~ s/%r/%e/;
4185
4186 $code.=<<___;
4187 .globl ${PREFIX}_set_decrypt_key
4188 .type ${PREFIX}_set_decrypt_key,\@abi-omnipotent
4189 .align 16
4190 ${PREFIX}_set_decrypt_key:
4191 .byte 0x48,0x83,0xEC,0x08 # sub rsp,8
4192 call __aesni_set_encrypt_key
4193 shl \$4,$bits # rounds-1 after _aesni_set_encrypt_key
4194 test %eax,%eax
4195 jnz .Ldec_key_ret
4196 lea 16($key,$bits),$inp # points at the end of key schedule
4197
4198 $movkey ($key),%xmm0 # just swap
4199 $movkey ($inp),%xmm1
4200 $movkey %xmm0,($inp)
4201 $movkey %xmm1,($key)
4202 lea 16($key),$key
4203 lea -16($inp),$inp
4204
4205 .Ldec_key_inverse:
4206 $movkey ($key),%xmm0 # swap and inverse
4207 $movkey ($inp),%xmm1
4208 aesimc %xmm0,%xmm0
4209 aesimc %xmm1,%xmm1
4210 lea 16($key),$key
4211 lea -16($inp),$inp
4212 $movkey %xmm0,16($inp)
4213 $movkey %xmm1,-16($key)
4214 cmp $key,$inp
4215 ja .Ldec_key_inverse
4216
4217 $movkey ($key),%xmm0 # inverse middle
4218 aesimc %xmm0,%xmm0
4219 pxor %xmm1,%xmm1
4220 $movkey %xmm0,($inp)
4221 pxor %xmm0,%xmm0
4222 .Ldec_key_ret:
4223 add \$8,%rsp
4224 ret
4225 .LSEH_end_set_decrypt_key:
4226 .size ${PREFIX}_set_decrypt_key,.-${PREFIX}_set_decrypt_key
4227 ___
4228 \f
4229 # This is based on submission by
4230 #
4231 # Huang Ying <ying.huang@intel.com>
4232 # Vinodh Gopal <vinodh.gopal@intel.com>
4233 # Kahraman Akdemir
4234 #
4235 # Agressively optimized in respect to aeskeygenassist's critical path
4236 # and is contained in %xmm0-5 to meet Win64 ABI requirement.
4237 #
4238 # int ${PREFIX}_set_encrypt_key(const unsigned char *inp,
4239 # int bits, AES_KEY * const key);
4240 #
4241 # input: $inp user-supplied key
4242 # $bits $inp length in bits
4243 # $key pointer to key schedule
4244 # output: %eax 0 denoting success, -1 or -2 - failure (see C)
4245 # $bits rounds-1 (used in aesni_set_decrypt_key)
4246 # *$key key schedule
4247 # $key pointer to key schedule (used in
4248 # aesni_set_decrypt_key)
4249 #
4250 # Subroutine is frame-less, which means that only volatile registers
4251 # are used. Note that it's declared "abi-omnipotent", which means that
4252 # amount of volatile registers is smaller on Windows.
4253 #
4254 $code.=<<___;
4255 .globl ${PREFIX}_set_encrypt_key
4256 .type ${PREFIX}_set_encrypt_key,\@abi-omnipotent
4257 .align 16
4258 ${PREFIX}_set_encrypt_key:
4259 __aesni_set_encrypt_key:
4260 .byte 0x48,0x83,0xEC,0x08 # sub rsp,8
4261 mov \$-1,%rax
4262 test $inp,$inp
4263 jz .Lenc_key_ret
4264 test $key,$key
4265 jz .Lenc_key_ret
4266
4267 mov \$`1<<28|1<<11`,%r10d # AVX and XOP bits
4268 movups ($inp),%xmm0 # pull first 128 bits of *userKey
4269 xorps %xmm4,%xmm4 # low dword of xmm4 is assumed 0
4270 and OPENSSL_ia32cap_P+4(%rip),%r10d
4271 lea 16($key),%rax # %rax is used as modifiable copy of $key
4272 cmp \$256,$bits
4273 je .L14rounds
4274 cmp \$192,$bits
4275 je .L12rounds
4276 cmp \$128,$bits
4277 jne .Lbad_keybits
4278
4279 .L10rounds:
4280 mov \$9,$bits # 10 rounds for 128-bit key
4281 cmp \$`1<<28`,%r10d # AVX, bit no XOP
4282 je .L10rounds_alt
4283
4284 $movkey %xmm0,($key) # round 0
4285 aeskeygenassist \$0x1,%xmm0,%xmm1 # round 1
4286 call .Lkey_expansion_128_cold
4287 aeskeygenassist \$0x2,%xmm0,%xmm1 # round 2
4288 call .Lkey_expansion_128
4289 aeskeygenassist \$0x4,%xmm0,%xmm1 # round 3
4290 call .Lkey_expansion_128
4291 aeskeygenassist \$0x8,%xmm0,%xmm1 # round 4
4292 call .Lkey_expansion_128
4293 aeskeygenassist \$0x10,%xmm0,%xmm1 # round 5
4294 call .Lkey_expansion_128
4295 aeskeygenassist \$0x20,%xmm0,%xmm1 # round 6
4296 call .Lkey_expansion_128
4297 aeskeygenassist \$0x40,%xmm0,%xmm1 # round 7
4298 call .Lkey_expansion_128
4299 aeskeygenassist \$0x80,%xmm0,%xmm1 # round 8
4300 call .Lkey_expansion_128
4301 aeskeygenassist \$0x1b,%xmm0,%xmm1 # round 9
4302 call .Lkey_expansion_128
4303 aeskeygenassist \$0x36,%xmm0,%xmm1 # round 10
4304 call .Lkey_expansion_128
4305 $movkey %xmm0,(%rax)
4306 mov $bits,80(%rax) # 240(%rdx)
4307 xor %eax,%eax
4308 jmp .Lenc_key_ret
4309
4310 .align 16
4311 .L10rounds_alt:
4312 movdqa .Lkey_rotate(%rip),%xmm5
4313 mov \$8,%r10d
4314 movdqa .Lkey_rcon1(%rip),%xmm4
4315 movdqa %xmm0,%xmm2
4316 movdqu %xmm0,($key)
4317 jmp .Loop_key128
4318
4319 .align 16
4320 .Loop_key128:
4321 pshufb %xmm5,%xmm0
4322 aesenclast %xmm4,%xmm0
4323 pslld \$1,%xmm4
4324 lea 16(%rax),%rax
4325
4326 movdqa %xmm2,%xmm3
4327 pslldq \$4,%xmm2
4328 pxor %xmm2,%xmm3
4329 pslldq \$4,%xmm2
4330 pxor %xmm2,%xmm3
4331 pslldq \$4,%xmm2
4332 pxor %xmm3,%xmm2
4333
4334 pxor %xmm2,%xmm0
4335 movdqu %xmm0,-16(%rax)
4336 movdqa %xmm0,%xmm2
4337
4338 dec %r10d
4339 jnz .Loop_key128
4340
4341 movdqa .Lkey_rcon1b(%rip),%xmm4
4342
4343 pshufb %xmm5,%xmm0
4344 aesenclast %xmm4,%xmm0
4345 pslld \$1,%xmm4
4346
4347 movdqa %xmm2,%xmm3
4348 pslldq \$4,%xmm2
4349 pxor %xmm2,%xmm3
4350 pslldq \$4,%xmm2
4351 pxor %xmm2,%xmm3
4352 pslldq \$4,%xmm2
4353 pxor %xmm3,%xmm2
4354
4355 pxor %xmm2,%xmm0
4356 movdqu %xmm0,(%rax)
4357
4358 movdqa %xmm0,%xmm2
4359 pshufb %xmm5,%xmm0
4360 aesenclast %xmm4,%xmm0
4361
4362 movdqa %xmm2,%xmm3
4363 pslldq \$4,%xmm2
4364 pxor %xmm2,%xmm3
4365 pslldq \$4,%xmm2
4366 pxor %xmm2,%xmm3
4367 pslldq \$4,%xmm2
4368 pxor %xmm3,%xmm2
4369
4370 pxor %xmm2,%xmm0
4371 movdqu %xmm0,16(%rax)
4372
4373 mov $bits,96(%rax) # 240($key)
4374 xor %eax,%eax
4375 jmp .Lenc_key_ret
4376
4377 .align 16
4378 .L12rounds:
4379 movq 16($inp),%xmm2 # remaining 1/3 of *userKey
4380 mov \$11,$bits # 12 rounds for 192
4381 cmp \$`1<<28`,%r10d # AVX, but no XOP
4382 je .L12rounds_alt
4383
4384 $movkey %xmm0,($key) # round 0
4385 aeskeygenassist \$0x1,%xmm2,%xmm1 # round 1,2
4386 call .Lkey_expansion_192a_cold
4387 aeskeygenassist \$0x2,%xmm2,%xmm1 # round 2,3
4388 call .Lkey_expansion_192b
4389 aeskeygenassist \$0x4,%xmm2,%xmm1 # round 4,5
4390 call .Lkey_expansion_192a
4391 aeskeygenassist \$0x8,%xmm2,%xmm1 # round 5,6
4392 call .Lkey_expansion_192b
4393 aeskeygenassist \$0x10,%xmm2,%xmm1 # round 7,8
4394 call .Lkey_expansion_192a
4395 aeskeygenassist \$0x20,%xmm2,%xmm1 # round 8,9
4396 call .Lkey_expansion_192b
4397 aeskeygenassist \$0x40,%xmm2,%xmm1 # round 10,11
4398 call .Lkey_expansion_192a
4399 aeskeygenassist \$0x80,%xmm2,%xmm1 # round 11,12
4400 call .Lkey_expansion_192b
4401 $movkey %xmm0,(%rax)
4402 mov $bits,48(%rax) # 240(%rdx)
4403 xor %rax, %rax
4404 jmp .Lenc_key_ret
4405
4406 .align 16
4407 .L12rounds_alt:
4408 movdqa .Lkey_rotate192(%rip),%xmm5
4409 movdqa .Lkey_rcon1(%rip),%xmm4
4410 mov \$8,%r10d
4411 movdqu %xmm0,($key)
4412 jmp .Loop_key192
4413
4414 .align 16
4415 .Loop_key192:
4416 movq %xmm2,0(%rax)
4417 movdqa %xmm2,%xmm1
4418 pshufb %xmm5,%xmm2
4419 aesenclast %xmm4,%xmm2
4420 pslld \$1, %xmm4
4421 lea 24(%rax),%rax
4422
4423 movdqa %xmm0,%xmm3
4424 pslldq \$4,%xmm0
4425 pxor %xmm0,%xmm3
4426 pslldq \$4,%xmm0
4427 pxor %xmm0,%xmm3
4428 pslldq \$4,%xmm0
4429 pxor %xmm3,%xmm0
4430
4431 pshufd \$0xff,%xmm0,%xmm3
4432 pxor %xmm1,%xmm3
4433 pslldq \$4,%xmm1
4434 pxor %xmm1,%xmm3
4435
4436 pxor %xmm2,%xmm0
4437 pxor %xmm3,%xmm2
4438 movdqu %xmm0,-16(%rax)
4439
4440 dec %r10d
4441 jnz .Loop_key192
4442
4443 mov $bits,32(%rax) # 240($key)
4444 xor %eax,%eax
4445 jmp .Lenc_key_ret
4446
4447 .align 16
4448 .L14rounds:
4449 movups 16($inp),%xmm2 # remaning half of *userKey
4450 mov \$13,$bits # 14 rounds for 256
4451 lea 16(%rax),%rax
4452 cmp \$`1<<28`,%r10d # AVX, but no XOP
4453 je .L14rounds_alt
4454
4455 $movkey %xmm0,($key) # round 0
4456 $movkey %xmm2,16($key) # round 1
4457 aeskeygenassist \$0x1,%xmm2,%xmm1 # round 2
4458 call .Lkey_expansion_256a_cold
4459 aeskeygenassist \$0x1,%xmm0,%xmm1 # round 3
4460 call .Lkey_expansion_256b
4461 aeskeygenassist \$0x2,%xmm2,%xmm1 # round 4
4462 call .Lkey_expansion_256a
4463 aeskeygenassist \$0x2,%xmm0,%xmm1 # round 5
4464 call .Lkey_expansion_256b
4465 aeskeygenassist \$0x4,%xmm2,%xmm1 # round 6
4466 call .Lkey_expansion_256a
4467 aeskeygenassist \$0x4,%xmm0,%xmm1 # round 7
4468 call .Lkey_expansion_256b
4469 aeskeygenassist \$0x8,%xmm2,%xmm1 # round 8
4470 call .Lkey_expansion_256a
4471 aeskeygenassist \$0x8,%xmm0,%xmm1 # round 9
4472 call .Lkey_expansion_256b
4473 aeskeygenassist \$0x10,%xmm2,%xmm1 # round 10
4474 call .Lkey_expansion_256a
4475 aeskeygenassist \$0x10,%xmm0,%xmm1 # round 11
4476 call .Lkey_expansion_256b
4477 aeskeygenassist \$0x20,%xmm2,%xmm1 # round 12
4478 call .Lkey_expansion_256a
4479 aeskeygenassist \$0x20,%xmm0,%xmm1 # round 13
4480 call .Lkey_expansion_256b
4481 aeskeygenassist \$0x40,%xmm2,%xmm1 # round 14
4482 call .Lkey_expansion_256a
4483 $movkey %xmm0,(%rax)
4484 mov $bits,16(%rax) # 240(%rdx)
4485 xor %rax,%rax
4486 jmp .Lenc_key_ret
4487
4488 .align 16
4489 .L14rounds_alt:
4490 movdqa .Lkey_rotate(%rip),%xmm5
4491 movdqa .Lkey_rcon1(%rip),%xmm4
4492 mov \$7,%r10d
4493 movdqu %xmm0,0($key)
4494 movdqa %xmm2,%xmm1
4495 movdqu %xmm2,16($key)
4496 jmp .Loop_key256
4497
4498 .align 16
4499 .Loop_key256:
4500 pshufb %xmm5,%xmm2
4501 aesenclast %xmm4,%xmm2
4502
4503 movdqa %xmm0,%xmm3
4504 pslldq \$4,%xmm0
4505 pxor %xmm0,%xmm3
4506 pslldq \$4,%xmm0
4507 pxor %xmm0,%xmm3
4508 pslldq \$4,%xmm0
4509 pxor %xmm3,%xmm0
4510 pslld \$1,%xmm4
4511
4512 pxor %xmm2,%xmm0
4513 movdqu %xmm0,(%rax)
4514
4515 dec %r10d
4516 jz .Ldone_key256
4517
4518 pshufd \$0xff,%xmm0,%xmm2
4519 pxor %xmm3,%xmm3
4520 aesenclast %xmm3,%xmm2
4521
4522 movdqa %xmm1,%xmm3
4523 pslldq \$4,%xmm1
4524 pxor %xmm1,%xmm3
4525 pslldq \$4,%xmm1
4526 pxor %xmm1,%xmm3
4527 pslldq \$4,%xmm1
4528 pxor %xmm3,%xmm1
4529
4530 pxor %xmm1,%xmm2
4531 movdqu %xmm2,16(%rax)
4532 lea 32(%rax),%rax
4533 movdqa %xmm2,%xmm1
4534
4535 jmp .Loop_key256
4536
4537 .Ldone_key256:
4538 mov $bits,16(%rax) # 240($key)
4539 xor %eax,%eax
4540 jmp .Lenc_key_ret
4541
4542 .align 16
4543 .Lbad_keybits:
4544 mov \$-2,%rax
4545 .Lenc_key_ret:
4546 pxor %xmm0,%xmm0
4547 pxor %xmm1,%xmm1
4548 pxor %xmm2,%xmm2
4549 pxor %xmm3,%xmm3
4550 pxor %xmm4,%xmm4
4551 pxor %xmm5,%xmm5
4552 add \$8,%rsp
4553 ret
4554 .LSEH_end_set_encrypt_key:
4555 \f
4556 .align 16
4557 .Lkey_expansion_128:
4558 $movkey %xmm0,(%rax)
4559 lea 16(%rax),%rax
4560 .Lkey_expansion_128_cold:
4561 shufps \$0b00010000,%xmm0,%xmm4
4562 xorps %xmm4, %xmm0
4563 shufps \$0b10001100,%xmm0,%xmm4
4564 xorps %xmm4, %xmm0
4565 shufps \$0b11111111,%xmm1,%xmm1 # critical path
4566 xorps %xmm1,%xmm0
4567 ret
4568
4569 .align 16
4570 .Lkey_expansion_192a:
4571 $movkey %xmm0,(%rax)
4572 lea 16(%rax),%rax
4573 .Lkey_expansion_192a_cold:
4574 movaps %xmm2, %xmm5
4575 .Lkey_expansion_192b_warm:
4576 shufps \$0b00010000,%xmm0,%xmm4
4577 movdqa %xmm2,%xmm3
4578 xorps %xmm4,%xmm0
4579 shufps \$0b10001100,%xmm0,%xmm4
4580 pslldq \$4,%xmm3
4581 xorps %xmm4,%xmm0
4582 pshufd \$0b01010101,%xmm1,%xmm1 # critical path
4583 pxor %xmm3,%xmm2
4584 pxor %xmm1,%xmm0
4585 pshufd \$0b11111111,%xmm0,%xmm3
4586 pxor %xmm3,%xmm2
4587 ret
4588
4589 .align 16
4590 .Lkey_expansion_192b:
4591 movaps %xmm0,%xmm3
4592 shufps \$0b01000100,%xmm0,%xmm5
4593 $movkey %xmm5,(%rax)
4594 shufps \$0b01001110,%xmm2,%xmm3
4595 $movkey %xmm3,16(%rax)
4596 lea 32(%rax),%rax
4597 jmp .Lkey_expansion_192b_warm
4598
4599 .align 16
4600 .Lkey_expansion_256a:
4601 $movkey %xmm2,(%rax)
4602 lea 16(%rax),%rax
4603 .Lkey_expansion_256a_cold:
4604 shufps \$0b00010000,%xmm0,%xmm4
4605 xorps %xmm4,%xmm0
4606 shufps \$0b10001100,%xmm0,%xmm4
4607 xorps %xmm4,%xmm0
4608 shufps \$0b11111111,%xmm1,%xmm1 # critical path
4609 xorps %xmm1,%xmm0
4610 ret
4611
4612 .align 16
4613 .Lkey_expansion_256b:
4614 $movkey %xmm0,(%rax)
4615 lea 16(%rax),%rax
4616
4617 shufps \$0b00010000,%xmm2,%xmm4
4618 xorps %xmm4,%xmm2
4619 shufps \$0b10001100,%xmm2,%xmm4
4620 xorps %xmm4,%xmm2
4621 shufps \$0b10101010,%xmm1,%xmm1 # critical path
4622 xorps %xmm1,%xmm2
4623 ret
4624 .size ${PREFIX}_set_encrypt_key,.-${PREFIX}_set_encrypt_key
4625 .size __aesni_set_encrypt_key,.-__aesni_set_encrypt_key
4626 ___
4627 }
4628 \f
4629 $code.=<<___;
4630 .align 64
4631 .Lbswap_mask:
4632 .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
4633 .Lincrement32:
4634 .long 6,6,6,0
4635 .Lincrement64:
4636 .long 1,0,0,0
4637 .Lxts_magic:
4638 .long 0x87,0,1,0
4639 .Lincrement1:
4640 .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
4641 .Lkey_rotate:
4642 .long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d
4643 .Lkey_rotate192:
4644 .long 0x04070605,0x04070605,0x04070605,0x04070605
4645 .Lkey_rcon1:
4646 .long 1,1,1,1
4647 .Lkey_rcon1b:
4648 .long 0x1b,0x1b,0x1b,0x1b
4649
4650 .asciz "AES for Intel AES-NI, CRYPTOGAMS by <appro\@openssl.org>"
4651 .align 64
4652 ___
4653
4654 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
4655 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
4656 if ($win64) {
4657 $rec="%rcx";
4658 $frame="%rdx";
4659 $context="%r8";
4660 $disp="%r9";
4661
4662 $code.=<<___;
4663 .extern __imp_RtlVirtualUnwind
4664 ___
4665 $code.=<<___ if ($PREFIX eq "aesni");
4666 .type ecb_ccm64_se_handler,\@abi-omnipotent
4667 .align 16
4668 ecb_ccm64_se_handler:
4669 push %rsi
4670 push %rdi
4671 push %rbx
4672 push %rbp
4673 push %r12
4674 push %r13
4675 push %r14
4676 push %r15
4677 pushfq
4678 sub \$64,%rsp
4679
4680 mov 120($context),%rax # pull context->Rax
4681 mov 248($context),%rbx # pull context->Rip
4682
4683 mov 8($disp),%rsi # disp->ImageBase
4684 mov 56($disp),%r11 # disp->HandlerData
4685
4686 mov 0(%r11),%r10d # HandlerData[0]
4687 lea (%rsi,%r10),%r10 # prologue label
4688 cmp %r10,%rbx # context->Rip<prologue label
4689 jb .Lcommon_seh_tail
4690
4691 mov 152($context),%rax # pull context->Rsp
4692
4693 mov 4(%r11),%r10d # HandlerData[1]
4694 lea (%rsi,%r10),%r10 # epilogue label
4695 cmp %r10,%rbx # context->Rip>=epilogue label
4696 jae .Lcommon_seh_tail
4697
4698 lea 0(%rax),%rsi # %xmm save area
4699 lea 512($context),%rdi # &context.Xmm6
4700 mov \$8,%ecx # 4*sizeof(%xmm0)/sizeof(%rax)
4701 .long 0xa548f3fc # cld; rep movsq
4702 lea 0x58(%rax),%rax # adjust stack pointer
4703
4704 jmp .Lcommon_seh_tail
4705 .size ecb_ccm64_se_handler,.-ecb_ccm64_se_handler
4706
4707 .type ctr_xts_se_handler,\@abi-omnipotent
4708 .align 16
4709 ctr_xts_se_handler:
4710 push %rsi
4711 push %rdi
4712 push %rbx
4713 push %rbp
4714 push %r12
4715 push %r13
4716 push %r14
4717 push %r15
4718 pushfq
4719 sub \$64,%rsp
4720
4721 mov 120($context),%rax # pull context->Rax
4722 mov 248($context),%rbx # pull context->Rip
4723
4724 mov 8($disp),%rsi # disp->ImageBase
4725 mov 56($disp),%r11 # disp->HandlerData
4726
4727 mov 0(%r11),%r10d # HandlerData[0]
4728 lea (%rsi,%r10),%r10 # prologue lable
4729 cmp %r10,%rbx # context->Rip<prologue label
4730 jb .Lcommon_seh_tail
4731
4732 mov 152($context),%rax # pull context->Rsp
4733
4734 mov 4(%r11),%r10d # HandlerData[1]
4735 lea (%rsi,%r10),%r10 # epilogue label
4736 cmp %r10,%rbx # context->Rip>=epilogue label
4737 jae .Lcommon_seh_tail
4738
4739 mov 160($context),%rax # pull context->Rbp
4740 lea -0xa0(%rax),%rsi # %xmm save area
4741 lea 512($context),%rdi # & context.Xmm6
4742 mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
4743 .long 0xa548f3fc # cld; rep movsq
4744
4745 jmp .Lcommon_rbp_tail
4746 .size ctr_xts_se_handler,.-ctr_xts_se_handler
4747
4748 .type ocb_se_handler,\@abi-omnipotent
4749 .align 16
4750 ocb_se_handler:
4751 push %rsi
4752 push %rdi
4753 push %rbx
4754 push %rbp
4755 push %r12
4756 push %r13
4757 push %r14
4758 push %r15
4759 pushfq
4760 sub \$64,%rsp
4761
4762 mov 120($context),%rax # pull context->Rax
4763 mov 248($context),%rbx # pull context->Rip
4764
4765 mov 8($disp),%rsi # disp->ImageBase
4766 mov 56($disp),%r11 # disp->HandlerData
4767
4768 mov 0(%r11),%r10d # HandlerData[0]
4769 lea (%rsi,%r10),%r10 # prologue lable
4770 cmp %r10,%rbx # context->Rip<prologue label
4771 jb .Lcommon_seh_tail
4772
4773 mov 4(%r11),%r10d # HandlerData[1]
4774 lea (%rsi,%r10),%r10 # epilogue label
4775 cmp %r10,%rbx # context->Rip>=epilogue label
4776 jae .Lcommon_seh_tail
4777
4778 mov 8(%r11),%r10d # HandlerData[2]
4779 lea (%rsi,%r10),%r10
4780 cmp %r10,%rbx # context->Rip>=pop label
4781 jae .Locb_no_xmm
4782
4783 mov 152($context),%rax # pull context->Rsp
4784
4785 lea (%rax),%rsi # %xmm save area
4786 lea 512($context),%rdi # & context.Xmm6
4787 mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
4788 .long 0xa548f3fc # cld; rep movsq
4789 lea 0xa0+0x28(%rax),%rax
4790
4791 .Locb_no_xmm:
4792 mov -8(%rax),%rbx
4793 mov -16(%rax),%rbp
4794 mov -24(%rax),%r12
4795 mov -32(%rax),%r13
4796 mov -40(%rax),%r14
4797
4798 mov %rbx,144($context) # restore context->Rbx
4799 mov %rbp,160($context) # restore context->Rbp
4800 mov %r12,216($context) # restore context->R12
4801 mov %r13,224($context) # restore context->R13
4802 mov %r14,232($context) # restore context->R14
4803
4804 jmp .Lcommon_seh_tail
4805 .size ocb_se_handler,.-ocb_se_handler
4806 ___
4807 $code.=<<___;
4808 .type cbc_se_handler,\@abi-omnipotent
4809 .align 16
4810 cbc_se_handler:
4811 push %rsi
4812 push %rdi
4813 push %rbx
4814 push %rbp
4815 push %r12
4816 push %r13
4817 push %r14
4818 push %r15
4819 pushfq
4820 sub \$64,%rsp
4821
4822 mov 152($context),%rax # pull context->Rsp
4823 mov 248($context),%rbx # pull context->Rip
4824
4825 lea .Lcbc_decrypt_bulk(%rip),%r10
4826 cmp %r10,%rbx # context->Rip<"prologue" label
4827 jb .Lcommon_seh_tail
4828
4829 lea .Lcbc_decrypt_body(%rip),%r10
4830 cmp %r10,%rbx # context->Rip<cbc_decrypt_body
4831 jb .Lrestore_cbc_rax
4832
4833 lea .Lcbc_ret(%rip),%r10
4834 cmp %r10,%rbx # context->Rip>="epilogue" label
4835 jae .Lcommon_seh_tail
4836
4837 lea 16(%rax),%rsi # %xmm save area
4838 lea 512($context),%rdi # &context.Xmm6
4839 mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
4840 .long 0xa548f3fc # cld; rep movsq
4841
4842 .Lcommon_rbp_tail:
4843 mov 160($context),%rax # pull context->Rbp
4844 mov (%rax),%rbp # restore saved %rbp
4845 lea 8(%rax),%rax # adjust stack pointer
4846 mov %rbp,160($context) # restore context->Rbp
4847 jmp .Lcommon_seh_tail
4848
4849 .Lrestore_cbc_rax:
4850 mov 120($context),%rax
4851
4852 .Lcommon_seh_tail:
4853 mov 8(%rax),%rdi
4854 mov 16(%rax),%rsi
4855 mov %rax,152($context) # restore context->Rsp
4856 mov %rsi,168($context) # restore context->Rsi
4857 mov %rdi,176($context) # restore context->Rdi
4858
4859 mov 40($disp),%rdi # disp->ContextRecord
4860 mov $context,%rsi # context
4861 mov \$154,%ecx # sizeof(CONTEXT)
4862 .long 0xa548f3fc # cld; rep movsq
4863
4864 mov $disp,%rsi
4865 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
4866 mov 8(%rsi),%rdx # arg2, disp->ImageBase
4867 mov 0(%rsi),%r8 # arg3, disp->ControlPc
4868 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
4869 mov 40(%rsi),%r10 # disp->ContextRecord
4870 lea 56(%rsi),%r11 # &disp->HandlerData
4871 lea 24(%rsi),%r12 # &disp->EstablisherFrame
4872 mov %r10,32(%rsp) # arg5
4873 mov %r11,40(%rsp) # arg6
4874 mov %r12,48(%rsp) # arg7
4875 mov %rcx,56(%rsp) # arg8, (NULL)
4876 call *__imp_RtlVirtualUnwind(%rip)
4877
4878 mov \$1,%eax # ExceptionContinueSearch
4879 add \$64,%rsp
4880 popfq
4881 pop %r15
4882 pop %r14
4883 pop %r13
4884 pop %r12
4885 pop %rbp
4886 pop %rbx
4887 pop %rdi
4888 pop %rsi
4889 ret
4890 .size cbc_se_handler,.-cbc_se_handler
4891
4892 .section .pdata
4893 .align 4
4894 ___
4895 $code.=<<___ if ($PREFIX eq "aesni");
4896 .rva .LSEH_begin_aesni_ecb_encrypt
4897 .rva .LSEH_end_aesni_ecb_encrypt
4898 .rva .LSEH_info_ecb
4899
4900 .rva .LSEH_begin_aesni_ccm64_encrypt_blocks
4901 .rva .LSEH_end_aesni_ccm64_encrypt_blocks
4902 .rva .LSEH_info_ccm64_enc
4903
4904 .rva .LSEH_begin_aesni_ccm64_decrypt_blocks
4905 .rva .LSEH_end_aesni_ccm64_decrypt_blocks
4906 .rva .LSEH_info_ccm64_dec
4907
4908 .rva .LSEH_begin_aesni_ctr32_encrypt_blocks
4909 .rva .LSEH_end_aesni_ctr32_encrypt_blocks
4910 .rva .LSEH_info_ctr32
4911
4912 .rva .LSEH_begin_aesni_xts_encrypt
4913 .rva .LSEH_end_aesni_xts_encrypt
4914 .rva .LSEH_info_xts_enc
4915
4916 .rva .LSEH_begin_aesni_xts_decrypt
4917 .rva .LSEH_end_aesni_xts_decrypt
4918 .rva .LSEH_info_xts_dec
4919
4920 .rva .LSEH_begin_aesni_ocb_encrypt
4921 .rva .LSEH_end_aesni_ocb_encrypt
4922 .rva .LSEH_info_ocb_enc
4923
4924 .rva .LSEH_begin_aesni_ocb_decrypt
4925 .rva .LSEH_end_aesni_ocb_decrypt
4926 .rva .LSEH_info_ocb_dec
4927 ___
4928 $code.=<<___;
4929 .rva .LSEH_begin_${PREFIX}_cbc_encrypt
4930 .rva .LSEH_end_${PREFIX}_cbc_encrypt
4931 .rva .LSEH_info_cbc
4932
4933 .rva ${PREFIX}_set_decrypt_key
4934 .rva .LSEH_end_set_decrypt_key
4935 .rva .LSEH_info_key
4936
4937 .rva ${PREFIX}_set_encrypt_key
4938 .rva .LSEH_end_set_encrypt_key
4939 .rva .LSEH_info_key
4940 .section .xdata
4941 .align 8
4942 ___
4943 $code.=<<___ if ($PREFIX eq "aesni");
4944 .LSEH_info_ecb:
4945 .byte 9,0,0,0
4946 .rva ecb_ccm64_se_handler
4947 .rva .Lecb_enc_body,.Lecb_enc_ret # HandlerData[]
4948 .LSEH_info_ccm64_enc:
4949 .byte 9,0,0,0
4950 .rva ecb_ccm64_se_handler
4951 .rva .Lccm64_enc_body,.Lccm64_enc_ret # HandlerData[]
4952 .LSEH_info_ccm64_dec:
4953 .byte 9,0,0,0
4954 .rva ecb_ccm64_se_handler
4955 .rva .Lccm64_dec_body,.Lccm64_dec_ret # HandlerData[]
4956 .LSEH_info_ctr32:
4957 .byte 9,0,0,0
4958 .rva ctr_xts_se_handler
4959 .rva .Lctr32_body,.Lctr32_epilogue # HandlerData[]
4960 .LSEH_info_xts_enc:
4961 .byte 9,0,0,0
4962 .rva ctr_xts_se_handler
4963 .rva .Lxts_enc_body,.Lxts_enc_epilogue # HandlerData[]
4964 .LSEH_info_xts_dec:
4965 .byte 9,0,0,0
4966 .rva ctr_xts_se_handler
4967 .rva .Lxts_dec_body,.Lxts_dec_epilogue # HandlerData[]
4968 .LSEH_info_ocb_enc:
4969 .byte 9,0,0,0
4970 .rva ocb_se_handler
4971 .rva .Locb_enc_body,.Locb_enc_epilogue # HandlerData[]
4972 .rva .Locb_enc_pop
4973 .long 0
4974 .LSEH_info_ocb_dec:
4975 .byte 9,0,0,0
4976 .rva ocb_se_handler
4977 .rva .Locb_dec_body,.Locb_dec_epilogue # HandlerData[]
4978 .rva .Locb_dec_pop
4979 .long 0
4980 ___
4981 $code.=<<___;
4982 .LSEH_info_cbc:
4983 .byte 9,0,0,0
4984 .rva cbc_se_handler
4985 .LSEH_info_key:
4986 .byte 0x01,0x04,0x01,0x00
4987 .byte 0x04,0x02,0x00,0x00 # sub rsp,8
4988 ___
4989 }
4990
4991 sub rex {
4992 local *opcode=shift;
4993 my ($dst,$src)=@_;
4994 my $rex=0;
4995
4996 $rex|=0x04 if($dst>=8);
4997 $rex|=0x01 if($src>=8);
4998 push @opcode,$rex|0x40 if($rex);
4999 }
5000
5001 sub aesni {
5002 my $line=shift;
5003 my @opcode=(0x66);
5004
5005 if ($line=~/(aeskeygenassist)\s+\$([x0-9a-f]+),\s*%xmm([0-9]+),\s*%xmm([0-9]+)/) {
5006 rex(\@opcode,$4,$3);
5007 push @opcode,0x0f,0x3a,0xdf;
5008 push @opcode,0xc0|($3&7)|(($4&7)<<3); # ModR/M
5009 my $c=$2;
5010 push @opcode,$c=~/^0/?oct($c):$c;
5011 return ".byte\t".join(',',@opcode);
5012 }
5013 elsif ($line=~/(aes[a-z]+)\s+%xmm([0-9]+),\s*%xmm([0-9]+)/) {
5014 my %opcodelet = (
5015 "aesimc" => 0xdb,
5016 "aesenc" => 0xdc, "aesenclast" => 0xdd,
5017 "aesdec" => 0xde, "aesdeclast" => 0xdf
5018 );
5019 return undef if (!defined($opcodelet{$1}));
5020 rex(\@opcode,$3,$2);
5021 push @opcode,0x0f,0x38,$opcodelet{$1};
5022 push @opcode,0xc0|($2&7)|(($3&7)<<3); # ModR/M
5023 return ".byte\t".join(',',@opcode);
5024 }
5025 elsif ($line=~/(aes[a-z]+)\s+([0x1-9a-fA-F]*)\(%rsp\),\s*%xmm([0-9]+)/) {
5026 my %opcodelet = (
5027 "aesenc" => 0xdc, "aesenclast" => 0xdd,
5028 "aesdec" => 0xde, "aesdeclast" => 0xdf
5029 );
5030 return undef if (!defined($opcodelet{$1}));
5031 my $off = $2;
5032 push @opcode,0x44 if ($3>=8);
5033 push @opcode,0x0f,0x38,$opcodelet{$1};
5034 push @opcode,0x44|(($3&7)<<3),0x24; # ModR/M
5035 push @opcode,($off=~/^0/?oct($off):$off)&0xff;
5036 return ".byte\t".join(',',@opcode);
5037 }
5038 return $line;
5039 }
5040
5041 sub movbe {
5042 ".byte 0x0f,0x38,0xf1,0x44,0x24,".shift;
5043 }
5044
5045 $code =~ s/\`([^\`]*)\`/eval($1)/gem;
5046 $code =~ s/\b(aes.*%xmm[0-9]+).*$/aesni($1)/gem;
5047 #$code =~ s/\bmovbe\s+%eax/bswap %eax; mov %eax/gm; # debugging artefact
5048 $code =~ s/\bmovbe\s+%eax,\s*([0-9]+)\(%rsp\)/movbe($1)/gem;
5049
5050 print $code;
5051
5052 close STDOUT;