]> git.ipfire.org Git - thirdparty/openssl.git/blame - crypto/aes/asm/aesfx-sparcv9.pl
Update copyright year
[thirdparty/openssl.git] / crypto / aes / asm / aesfx-sparcv9.pl
CommitLineData
6aa36e8e 1#! /usr/bin/env perl
33388b44 2# Copyright 2016-2020 The OpenSSL Project Authors. All Rights Reserved.
6aa36e8e 3#
c918d8e2 4# Licensed under the Apache License 2.0 (the "License"). You may not use
6aa36e8e
RS
5# this file except in compliance with the License. You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
fb65020b
AP
9#
10# ====================================================================
11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16
17# March 2016
18#
19# Initial support for Fujitsu SPARC64 X/X+ comprises minimally
20# required key setup and single-block procedures.
d41de45a
AP
21#
22# April 2016
23#
24# Add "teaser" CBC and CTR mode-specific subroutines. "Teaser" means
46f4e1be 25# that parallelizable nature of CBC decrypt and CTR is not utilized
d41de45a
AP
26# yet. CBC encrypt on the other hand is as good as it can possibly
27# get processing one byte in 4.1 cycles with 128-bit key on SPARC64 X.
28# This is ~6x faster than pure software implementation...
9515acca
AP
29#
30# July 2016
31#
32# Switch from faligndata to fshiftorx, which allows to omit alignaddr
33# instructions and improve single-block and short-input performance
34# with misaligned data.
fb65020b 35
1aa89a7a 36$output = pop and open STDOUT,">$output";
fb65020b
AP
37
38{
39my ($inp,$out,$key,$rounds,$tmp,$mask) = map("%o$_",(0..5));
40
41$code.=<<___;
d41de45a
AP
42#include "sparc_arch.h"
43
44#define LOCALS (STACK_BIAS+STACK_FRAME)
45
fb65020b
AP
46.text
47
48.globl aes_fx_encrypt
49.align 32
50aes_fx_encrypt:
51 and $inp, 7, $tmp ! is input aligned?
d41de45a 52 andn $inp, 7, $inp
d41de45a 53 ldd [$key + 0], %f6 ! round[0]
fb65020b 54 ldd [$key + 8], %f8
9515acca
AP
55 mov %o7, %g1
56 ld [$key + 240], $rounds
fb65020b 57
9515acca
AP
581: call .+8
59 add %o7, .Linp_align-1b, %o7
60
61 sll $tmp, 3, $tmp
fb65020b
AP
62 ldd [$inp + 0], %f0 ! load input
63 brz,pt $tmp, .Lenc_inp_aligned
64 ldd [$inp + 8], %f2
65
9515acca 66 ldd [%o7 + $tmp], %f14 ! shift left params
fb65020b 67 ldd [$inp + 16], %f4
9515acca
AP
68 fshiftorx %f0, %f2, %f14, %f0
69 fshiftorx %f2, %f4, %f14, %f2
fb65020b
AP
70
71.Lenc_inp_aligned:
d41de45a 72 ldd [$key + 16], %f10 ! round[1]
fb65020b 73 ldd [$key + 24], %f12
fb65020b
AP
74
75 fxor %f0, %f6, %f0 ! ^=round[0]
76 fxor %f2, %f8, %f2
d41de45a
AP
77 ldd [$key + 32], %f6 ! round[2]
78 ldd [$key + 40], %f8
79 add $key, 32, $key
fb65020b
AP
80 sub $rounds, 4, $rounds
81
82.Loop_enc:
83 fmovd %f0, %f4
84 faesencx %f2, %f10, %f0
85 faesencx %f4, %f12, %f2
86 ldd [$key + 16], %f10
87 ldd [$key + 24], %f12
88 add $key, 32, $key
89
90 fmovd %f0, %f4
91 faesencx %f2, %f6, %f0
92 faesencx %f4, %f8, %f2
93 ldd [$key + 0], %f6
94 ldd [$key + 8], %f8
95
96 brnz,a $rounds, .Loop_enc
97 sub $rounds, 2, $rounds
98
99 andcc $out, 7, $tmp ! is output aligned?
9515acca 100 andn $out, 7, $out
fb65020b 101 mov 0xff, $mask
9515acca
AP
102 srl $mask, $tmp, $mask
103 add %o7, 64, %o7
104 sll $tmp, 3, $tmp
fb65020b
AP
105
106 fmovd %f0, %f4
107 faesencx %f2, %f10, %f0
108 faesencx %f4, %f12, %f2
9515acca
AP
109 ldd [%o7 + $tmp], %f14 ! shift right params
110
fb65020b
AP
111 fmovd %f0, %f4
112 faesenclx %f2, %f6, %f0
113 faesenclx %f4, %f8, %f2
114
9515acca
AP
115 bnz,pn %icc, .Lenc_out_unaligned
116 mov %g1, %o7
fb65020b
AP
117
118 std %f0, [$out + 0]
119 retl
120 std %f2, [$out + 8]
121
d41de45a 122.align 16
fb65020b 123.Lenc_out_unaligned:
9515acca
AP
124 add $out, 16, $inp
125 orn %g0, $mask, $tmp
126 fshiftorx %f0, %f0, %f14, %f4
127 fshiftorx %f0, %f2, %f14, %f6
128 fshiftorx %f2, %f2, %f14, %f8
fb65020b
AP
129
130 stda %f4, [$out + $mask]0xc0 ! partial store
131 std %f6, [$out + 8]
9515acca 132 stda %f8, [$inp + $tmp]0xc0 ! partial store
d41de45a
AP
133 retl
134 nop
ff823ee8 135.type aes_fx_encrypt,#function
fb65020b
AP
136.size aes_fx_encrypt,.-aes_fx_encrypt
137
138.globl aes_fx_decrypt
139.align 32
140aes_fx_decrypt:
141 and $inp, 7, $tmp ! is input aligned?
d41de45a 142 andn $inp, 7, $inp
d41de45a 143 ldd [$key + 0], %f6 ! round[0]
fb65020b 144 ldd [$key + 8], %f8
9515acca
AP
145 mov %o7, %g1
146 ld [$key + 240], $rounds
147
1481: call .+8
149 add %o7, .Linp_align-1b, %o7
fb65020b 150
9515acca 151 sll $tmp, 3, $tmp
fb65020b
AP
152 ldd [$inp + 0], %f0 ! load input
153 brz,pt $tmp, .Ldec_inp_aligned
154 ldd [$inp + 8], %f2
155
9515acca 156 ldd [%o7 + $tmp], %f14 ! shift left params
fb65020b 157 ldd [$inp + 16], %f4
9515acca
AP
158 fshiftorx %f0, %f2, %f14, %f0
159 fshiftorx %f2, %f4, %f14, %f2
fb65020b
AP
160
161.Ldec_inp_aligned:
d41de45a 162 ldd [$key + 16], %f10 ! round[1]
fb65020b 163 ldd [$key + 24], %f12
fb65020b
AP
164
165 fxor %f0, %f6, %f0 ! ^=round[0]
166 fxor %f2, %f8, %f2
d41de45a
AP
167 ldd [$key + 32], %f6 ! round[2]
168 ldd [$key + 40], %f8
169 add $key, 32, $key
fb65020b
AP
170 sub $rounds, 4, $rounds
171
172.Loop_dec:
173 fmovd %f0, %f4
174 faesdecx %f2, %f10, %f0
175 faesdecx %f4, %f12, %f2
176 ldd [$key + 16], %f10
177 ldd [$key + 24], %f12
178 add $key, 32, $key
179
180 fmovd %f0, %f4
181 faesdecx %f2, %f6, %f0
182 faesdecx %f4, %f8, %f2
183 ldd [$key + 0], %f6
184 ldd [$key + 8], %f8
185
186 brnz,a $rounds, .Loop_dec
187 sub $rounds, 2, $rounds
188
189 andcc $out, 7, $tmp ! is output aligned?
9515acca 190 andn $out, 7, $out
fb65020b 191 mov 0xff, $mask
9515acca
AP
192 srl $mask, $tmp, $mask
193 add %o7, 64, %o7
194 sll $tmp, 3, $tmp
fb65020b
AP
195
196 fmovd %f0, %f4
197 faesdecx %f2, %f10, %f0
198 faesdecx %f4, %f12, %f2
9515acca
AP
199 ldd [%o7 + $tmp], %f14 ! shift right params
200
fb65020b
AP
201 fmovd %f0, %f4
202 faesdeclx %f2, %f6, %f0
203 faesdeclx %f4, %f8, %f2
204
9515acca
AP
205 bnz,pn %icc, .Ldec_out_unaligned
206 mov %g1, %o7
fb65020b
AP
207
208 std %f0, [$out + 0]
209 retl
210 std %f2, [$out + 8]
211
d41de45a 212.align 16
fb65020b 213.Ldec_out_unaligned:
9515acca
AP
214 add $out, 16, $inp
215 orn %g0, $mask, $tmp
216 fshiftorx %f0, %f0, %f14, %f4
217 fshiftorx %f0, %f2, %f14, %f6
218 fshiftorx %f2, %f2, %f14, %f8
fb65020b
AP
219
220 stda %f4, [$out + $mask]0xc0 ! partial store
221 std %f6, [$out + 8]
9515acca 222 stda %f8, [$inp + $tmp]0xc0 ! partial store
d41de45a
AP
223 retl
224 nop
ff823ee8 225.type aes_fx_decrypt,#function
fb65020b
AP
226.size aes_fx_decrypt,.-aes_fx_decrypt
227___
228}
229{
230my ($inp,$bits,$out,$tmp,$inc) = map("%o$_",(0..5));
231$code.=<<___;
232.globl aes_fx_set_decrypt_key
233.align 32
234aes_fx_set_decrypt_key:
235 b .Lset_encrypt_key
236 mov -1, $inc
237 retl
238 nop
ff823ee8 239.type aes_fx_set_decrypt_key,#function
fb65020b
AP
240.size aes_fx_set_decrypt_key,.-aes_fx_set_decrypt_key
241
242.globl aes_fx_set_encrypt_key
243.align 32
244aes_fx_set_encrypt_key:
245 mov 1, $inc
d41de45a 246 nop
fb65020b
AP
247.Lset_encrypt_key:
248 and $inp, 7, $tmp
d41de45a 249 andn $inp, 7, $inp
9515acca
AP
250 sll $tmp, 3, $tmp
251 mov %o7, %g1
252
2531: call .+8
254 add %o7, .Linp_align-1b, %o7
255
256 ldd [%o7 + $tmp], %f10 ! shift left params
257 mov %g1, %o7
fb65020b
AP
258
259 cmp $bits, 192
260 ldd [$inp + 0], %f0
261 bl,pt %icc, .L128
262 ldd [$inp + 8], %f2
263
264 be,pt %icc, .L192
265 ldd [$inp + 16], %f4
266 brz,pt $tmp, .L256aligned
267 ldd [$inp + 24], %f6
268
269 ldd [$inp + 32], %f8
9515acca
AP
270 fshiftorx %f0, %f2, %f10, %f0
271 fshiftorx %f2, %f4, %f10, %f2
272 fshiftorx %f4, %f6, %f10, %f4
273 fshiftorx %f6, %f8, %f10, %f6
fb65020b
AP
274
275.L256aligned:
276 mov 14, $bits
277 and $inc, `14*16`, $tmp
278 st $bits, [$out + 240] ! store rounds
279 add $out, $tmp, $out ! start or end of key schedule
280 sllx $inc, 4, $inc ! 16 or -16
281___
282for ($i=0; $i<6; $i++) {
283 $code.=<<___;
284 std %f0, [$out + 0]
285 faeskeyx %f6, `0x10+$i`, %f0
286 std %f2, [$out + 8]
287 add $out, $inc, $out
288 faeskeyx %f0, 0x00, %f2
289 std %f4, [$out + 0]
290 faeskeyx %f2, 0x01, %f4
291 std %f6, [$out + 8]
292 add $out, $inc, $out
293 faeskeyx %f4, 0x00, %f6
294___
295}
296$code.=<<___;
297 std %f0, [$out + 0]
298 faeskeyx %f6, `0x10+$i`, %f0
299 std %f2, [$out + 8]
300 add $out, $inc, $out
301 faeskeyx %f0, 0x00, %f2
d41de45a
AP
302 std %f4,[$out + 0]
303 std %f6,[$out + 8]
fb65020b 304 add $out, $inc, $out
d41de45a
AP
305 std %f0,[$out + 0]
306 std %f2,[$out + 8]
fb65020b
AP
307 retl
308 xor %o0, %o0, %o0 ! return 0
309
310.align 16
311.L192:
312 brz,pt $tmp, .L192aligned
313 nop
314
315 ldd [$inp + 24], %f6
9515acca
AP
316 fshiftorx %f0, %f2, %f10, %f0
317 fshiftorx %f2, %f4, %f10, %f2
318 fshiftorx %f4, %f6, %f10, %f4
fb65020b
AP
319
320.L192aligned:
321 mov 12, $bits
322 and $inc, `12*16`, $tmp
323 st $bits, [$out + 240] ! store rounds
324 add $out, $tmp, $out ! start or end of key schedule
325 sllx $inc, 4, $inc ! 16 or -16
326___
327for ($i=0; $i<8; $i+=2) {
328 $code.=<<___;
329 std %f0, [$out + 0]
330 faeskeyx %f4, `0x10+$i`, %f0
331 std %f2, [$out + 8]
332 add $out, $inc, $out
333 faeskeyx %f0, 0x00, %f2
334 std %f4, [$out + 0]
335 faeskeyx %f2, 0x00, %f4
336 std %f0, [$out + 8]
337 add $out, $inc, $out
338 faeskeyx %f4, `0x10+$i+1`, %f0
339 std %f2, [$out + 0]
340 faeskeyx %f0, 0x00, %f2
341 std %f4, [$out + 8]
342 add $out, $inc, $out
343___
344$code.=<<___ if ($i<6);
345 faeskeyx %f2, 0x00, %f4
346___
347}
348$code.=<<___;
349 std %f0, [$out + 0]
350 std %f2, [$out + 8]
351 retl
352 xor %o0, %o0, %o0 ! return 0
353
354.align 16
355.L128:
356 brz,pt $tmp, .L128aligned
357 nop
358
359 ldd [$inp + 16], %f4
9515acca
AP
360 fshiftorx %f0, %f2, %f10, %f0
361 fshiftorx %f2, %f4, %f10, %f2
fb65020b
AP
362
363.L128aligned:
364 mov 10, $bits
365 and $inc, `10*16`, $tmp
366 st $bits, [$out + 240] ! store rounds
367 add $out, $tmp, $out ! start or end of key schedule
368 sllx $inc, 4, $inc ! 16 or -16
369___
370for ($i=0; $i<10; $i++) {
371 $code.=<<___;
372 std %f0, [$out + 0]
373 faeskeyx %f2, `0x10+$i`, %f0
374 std %f2, [$out + 8]
375 add $out, $inc, $out
376 faeskeyx %f0, 0x00, %f2
377___
378}
379$code.=<<___;
380 std %f0, [$out + 0]
381 std %f2, [$out + 8]
382 retl
383 xor %o0, %o0, %o0 ! return 0
ff823ee8 384.type aes_fx_set_encrypt_key,#function
fb65020b
AP
385.size aes_fx_set_encrypt_key,.-aes_fx_set_encrypt_key
386___
387}
d41de45a
AP
388{
389my ($inp,$out,$len,$key,$ivp,$dir) = map("%i$_",(0..5));
390my ($rounds,$inner,$end,$inc,$ialign,$oalign,$mask) = map("%l$_",(0..7));
9515acca 391my ($iv0,$iv1,$r0hi,$r0lo,$rlhi,$rllo,$in0,$in1,$intail,$outhead,$fshift)
d41de45a
AP
392 = map("%f$_",grep { !($_ & 1) } (16 .. 62));
393my ($ileft,$iright) = ($ialign,$oalign);
394
395$code.=<<___;
396.globl aes_fx_cbc_encrypt
397.align 32
398aes_fx_cbc_encrypt:
399 save %sp, -STACK_FRAME-16, %sp
8604a6e0 400 srln $len, 4, $len
d41de45a 401 and $inp, 7, $ialign
d41de45a 402 andn $inp, 7, $inp
9515acca
AP
403 brz,pn $len, .Lcbc_no_data
404 sll $ialign, 3, $ileft
405
4061: call .+8
407 add %o7, .Linp_align-1b, %o7
408
d41de45a
AP
409 ld [$key + 240], $rounds
410 and $out, 7, $oalign
411 ld [$ivp + 0], %f0 ! load ivec
9515acca 412 andn $out, 7, $out
d41de45a 413 ld [$ivp + 4], %f1
9515acca 414 sll $oalign, 3, $mask
d41de45a
AP
415 ld [$ivp + 8], %f2
416 ld [$ivp + 12], %f3
417
418 sll $rounds, 4, $rounds
419 add $rounds, $key, $end
420 ldd [$key + 0], $r0hi ! round[0]
421 ldd [$key + 8], $r0lo
422
423 add $inp, 16, $inp
8604a6e0 424 sub $len, 1, $len
d41de45a
AP
425 ldd [$end + 0], $rlhi ! round[last]
426 ldd [$end + 8], $rllo
427
428 mov 16, $inc
429 movrz $len, 0, $inc
430 ldd [$key + 16], %f10 ! round[1]
431 ldd [$key + 24], %f12
432
9515acca
AP
433 ldd [%o7 + $ileft], $fshift ! shift left params
434 add %o7, 64, %o7
d41de45a
AP
435 ldd [$inp - 16], $in0 ! load input
436 ldd [$inp - 8], $in1
437 ldda [$inp]0x82, $intail ! non-faulting load
438 brz $dir, .Lcbc_decrypt
439 add $inp, $inc, $inp ! inp+=16
440
441 fxor $r0hi, %f0, %f0 ! ivec^=round[0]
442 fxor $r0lo, %f2, %f2
9515acca
AP
443 fshiftorx $in0, $in1, $fshift, $in0
444 fshiftorx $in1, $intail, $fshift, $in1
445 nop
d41de45a
AP
446
447.Loop_cbc_enc:
448 fxor $in0, %f0, %f0 ! inp^ivec^round[0]
449 fxor $in1, %f2, %f2
450 ldd [$key + 32], %f6 ! round[2]
451 ldd [$key + 40], %f8
452 add $key, 32, $end
453 sub $rounds, 16*6, $inner
454
455.Lcbc_enc:
456 fmovd %f0, %f4
457 faesencx %f2, %f10, %f0
458 faesencx %f4, %f12, %f2
459 ldd [$end + 16], %f10
460 ldd [$end + 24], %f12
461 add $end, 32, $end
462
463 fmovd %f0, %f4
464 faesencx %f2, %f6, %f0
465 faesencx %f4, %f8, %f2
466 ldd [$end + 0], %f6
467 ldd [$end + 8], %f8
468
469 brnz,a $inner, .Lcbc_enc
470 sub $inner, 16*2, $inner
471
472 fmovd %f0, %f4
473 faesencx %f2, %f10, %f0
474 faesencx %f4, %f12, %f2
475 ldd [$end + 16], %f10 ! round[last-1]
476 ldd [$end + 24], %f12
477
d41de45a
AP
478 movrz $len, 0, $inc
479 fmovd $intail, $in0
480 ldd [$inp - 8], $in1 ! load next input block
481 ldda [$inp]0x82, $intail ! non-faulting load
482 add $inp, $inc, $inp ! inp+=16
483
9515acca
AP
484 fmovd %f0, %f4
485 faesencx %f2, %f6, %f0
486 faesencx %f4, %f8, %f2
487
488 fshiftorx $in0, $in1, $fshift, $in0
489 fshiftorx $in1, $intail, $fshift, $in1
490
d41de45a
AP
491 fmovd %f0, %f4
492 faesencx %f2, %f10, %f0
493 faesencx %f4, %f12, %f2
494 ldd [$key + 16], %f10 ! round[1]
495 ldd [$key + 24], %f12
496
9515acca
AP
497 fxor $r0hi, $in0, $in0 ! inp^=round[0]
498 fxor $r0lo, $in1, $in1
d41de45a
AP
499
500 fmovd %f0, %f4
9515acca 501 faesenclx %f2, $rlhi, %f0
d41de45a
AP
502 faesenclx %f4, $rllo, %f2
503
d41de45a 504 brnz,pn $oalign, .Lcbc_enc_unaligned_out
9515acca 505 nop
d41de45a 506
9515acca
AP
507 std %f0, [$out + 0]
508 std %f2, [$out + 8]
d41de45a
AP
509 add $out, 16, $out
510
511 brnz,a $len, .Loop_cbc_enc
8604a6e0 512 sub $len, 1, $len
d41de45a 513
9515acca
AP
514 st %f0, [$ivp + 0] ! output ivec
515 st %f1, [$ivp + 4]
516 st %f2, [$ivp + 8]
517 st %f3, [$ivp + 12]
d41de45a
AP
518
519.Lcbc_no_data:
520 ret
521 restore
522
523.align 32
524.Lcbc_enc_unaligned_out:
9515acca 525 ldd [%o7 + $mask], $fshift ! shift right params
d41de45a 526 mov 0xff, $mask
d41de45a
AP
527 srl $mask, $oalign, $mask
528 sub %g0, $ileft, $iright
529
9515acca
AP
530 fshiftorx %f0, %f0, $fshift, %f6
531 fshiftorx %f0, %f2, $fshift, %f8
d41de45a
AP
532
533 stda %f6, [$out + $mask]0xc0 ! partial store
9515acca 534 orn %g0, $mask, $mask
d41de45a
AP
535 std %f8, [$out + 8]
536 add $out, 16, $out
537 brz $len, .Lcbc_enc_unaligned_out_done
9515acca
AP
538 sub $len, 1, $len
539 b .Loop_cbc_enc_unaligned_out
540 nop
d41de45a 541
9515acca 542.align 32
d41de45a 543.Loop_cbc_enc_unaligned_out:
9515acca 544 fmovd %f2, $outhead
d41de45a
AP
545 fxor $in0, %f0, %f0 ! inp^ivec^round[0]
546 fxor $in1, %f2, %f2
547 ldd [$key + 32], %f6 ! round[2]
548 ldd [$key + 40], %f8
549
550 fmovd %f0, %f4
551 faesencx %f2, %f10, %f0
552 faesencx %f4, %f12, %f2
553 ldd [$key + 48], %f10 ! round[3]
554 ldd [$key + 56], %f12
555
556 ldx [$inp - 16], %o0
557 ldx [$inp - 8], %o1
9515acca 558 brz $ileft, .Lcbc_enc_aligned_inp
d41de45a
AP
559 movrz $len, 0, $inc
560
561 ldx [$inp], %o2
562 sllx %o0, $ileft, %o0
563 srlx %o1, $iright, %g1
564 sllx %o1, $ileft, %o1
565 or %g1, %o0, %o0
566 srlx %o2, $iright, %o2
567 or %o2, %o1, %o1
568
569.Lcbc_enc_aligned_inp:
570 fmovd %f0, %f4
571 faesencx %f2, %f6, %f0
572 faesencx %f4, %f8, %f2
573 ldd [$key + 64], %f6 ! round[4]
574 ldd [$key + 72], %f8
575 add $key, 64, $end
576 sub $rounds, 16*8, $inner
577
578 stx %o0, [%sp + LOCALS + 0]
579 stx %o1, [%sp + LOCALS + 8]
580 add $inp, $inc, $inp ! inp+=16
9515acca 581 nop
d41de45a
AP
582
583.Lcbc_enc_unaligned:
584 fmovd %f0, %f4
585 faesencx %f2, %f10, %f0
586 faesencx %f4, %f12, %f2
587 ldd [$end + 16], %f10
588 ldd [$end + 24], %f12
589 add $end, 32, $end
590
591 fmovd %f0, %f4
592 faesencx %f2, %f6, %f0
593 faesencx %f4, %f8, %f2
594 ldd [$end + 0], %f6
595 ldd [$end + 8], %f8
596
597 brnz,a $inner, .Lcbc_enc_unaligned
598 sub $inner, 16*2, $inner
599
600 fmovd %f0, %f4
601 faesencx %f2, %f10, %f0
602 faesencx %f4, %f12, %f2
603 ldd [$end + 16], %f10 ! round[last-1]
604 ldd [$end + 24], %f12
605
606 fmovd %f0, %f4
607 faesencx %f2, %f6, %f0
608 faesencx %f4, %f8, %f2
9515acca 609
d41de45a
AP
610 ldd [%sp + LOCALS + 0], $in0
611 ldd [%sp + LOCALS + 8], $in1
612
613 fmovd %f0, %f4
614 faesencx %f2, %f10, %f0
615 faesencx %f4, %f12, %f2
616 ldd [$key + 16], %f10 ! round[1]
617 ldd [$key + 24], %f12
618
9515acca
AP
619 fxor $r0hi, $in0, $in0 ! inp^=round[0]
620 fxor $r0lo, $in1, $in1
621
d41de45a 622 fmovd %f0, %f4
9515acca 623 faesenclx %f2, $rlhi, %f0
d41de45a
AP
624 faesenclx %f4, $rllo, %f2
625
9515acca
AP
626 fshiftorx $outhead, %f0, $fshift, %f6
627 fshiftorx %f0, %f2, $fshift, %f8
d41de45a
AP
628 std %f6, [$out + 0]
629 std %f8, [$out + 8]
630 add $out, 16, $out
631
632 brnz,a $len, .Loop_cbc_enc_unaligned_out
8604a6e0 633 sub $len, 1, $len
d41de45a
AP
634
635.Lcbc_enc_unaligned_out_done:
9515acca 636 fshiftorx %f2, %f2, $fshift, %f8
d41de45a
AP
637 stda %f8, [$out + $mask]0xc0 ! partial store
638
9515acca
AP
639 st %f0, [$ivp + 0] ! output ivec
640 st %f1, [$ivp + 4]
641 st %f2, [$ivp + 8]
642 st %f3, [$ivp + 12]
d41de45a
AP
643
644 ret
645 restore
646
647.align 32
648.Lcbc_decrypt:
9515acca
AP
649 fshiftorx $in0, $in1, $fshift, $in0
650 fshiftorx $in1, $intail, $fshift, $in1
d41de45a
AP
651 fmovd %f0, $iv0
652 fmovd %f2, $iv1
653
654.Loop_cbc_dec:
655 fxor $in0, $r0hi, %f0 ! inp^round[0]
656 fxor $in1, $r0lo, %f2
657 ldd [$key + 32], %f6 ! round[2]
658 ldd [$key + 40], %f8
659 add $key, 32, $end
660 sub $rounds, 16*6, $inner
661
662.Lcbc_dec:
663 fmovd %f0, %f4
664 faesdecx %f2, %f10, %f0
665 faesdecx %f4, %f12, %f2
666 ldd [$end + 16], %f10
667 ldd [$end + 24], %f12
668 add $end, 32, $end
669
670 fmovd %f0, %f4
671 faesdecx %f2, %f6, %f0
672 faesdecx %f4, %f8, %f2
673 ldd [$end + 0], %f6
674 ldd [$end + 8], %f8
675
676 brnz,a $inner, .Lcbc_dec
677 sub $inner, 16*2, $inner
678
679 fmovd %f0, %f4
680 faesdecx %f2, %f10, %f0
681 faesdecx %f4, %f12, %f2
682 ldd [$end + 16], %f10 ! round[last-1]
683 ldd [$end + 24], %f12
684
685 fmovd %f0, %f4
686 faesdecx %f2, %f6, %f0
687 faesdecx %f4, %f8, %f2
688 fxor $iv0, $rlhi, %f6 ! ivec^round[last]
689 fxor $iv1, $rllo, %f8
690 fmovd $in0, $iv0
691 fmovd $in1, $iv1
692
693 movrz $len, 0, $inc
694 fmovd $intail, $in0
695 ldd [$inp - 8], $in1 ! load next input block
696 ldda [$inp]0x82, $intail ! non-faulting load
697 add $inp, $inc, $inp ! inp+=16
698
699 fmovd %f0, %f4
700 faesdecx %f2, %f10, %f0
701 faesdecx %f4, %f12, %f2
702 ldd [$key + 16], %f10 ! round[1]
703 ldd [$key + 24], %f12
704
9515acca
AP
705 fshiftorx $in0, $in1, $fshift, $in0
706 fshiftorx $in1, $intail, $fshift, $in1
d41de45a
AP
707
708 fmovd %f0, %f4
709 faesdeclx %f2, %f6, %f0
710 faesdeclx %f4, %f8, %f2
711
712 brnz,pn $oalign, .Lcbc_dec_unaligned_out
713 nop
714
715 std %f0, [$out + 0]
716 std %f2, [$out + 8]
717 add $out, 16, $out
718
719 brnz,a $len, .Loop_cbc_dec
8604a6e0 720 sub $len, 1, $len
d41de45a
AP
721
722 st $iv0, [$ivp + 0] ! output ivec
723 st $iv0#lo, [$ivp + 4]
724 st $iv1, [$ivp + 8]
725 st $iv1#lo, [$ivp + 12]
726
727 ret
728 restore
729
730.align 32
731.Lcbc_dec_unaligned_out:
9515acca 732 ldd [%o7 + $mask], $fshift ! shift right params
d41de45a 733 mov 0xff, $mask
d41de45a
AP
734 srl $mask, $oalign, $mask
735 sub %g0, $ileft, $iright
736
9515acca
AP
737 fshiftorx %f0, %f0, $fshift, %f6
738 fshiftorx %f0, %f2, $fshift, %f8
d41de45a 739
9515acca
AP
740 stda %f6, [$out + $mask]0xc0 ! partial store
741 orn %g0, $mask, $mask
742 std %f8, [$out + 8]
d41de45a
AP
743 add $out, 16, $out
744 brz $len, .Lcbc_dec_unaligned_out_done
9515acca
AP
745 sub $len, 1, $len
746 b .Loop_cbc_dec_unaligned_out
747 nop
d41de45a 748
9515acca 749.align 32
d41de45a
AP
750.Loop_cbc_dec_unaligned_out:
751 fmovd %f2, $outhead
752 fxor $in0, $r0hi, %f0 ! inp^round[0]
753 fxor $in1, $r0lo, %f2
754 ldd [$key + 32], %f6 ! round[2]
755 ldd [$key + 40], %f8
756
757 fmovd %f0, %f4
758 faesdecx %f2, %f10, %f0
759 faesdecx %f4, %f12, %f2
760 ldd [$key + 48], %f10 ! round[3]
761 ldd [$key + 56], %f12
762
763 ldx [$inp - 16], %o0
764 ldx [$inp - 8], %o1
9515acca 765 brz $ileft, .Lcbc_dec_aligned_inp
d41de45a
AP
766 movrz $len, 0, $inc
767
768 ldx [$inp], %o2
769 sllx %o0, $ileft, %o0
770 srlx %o1, $iright, %g1
771 sllx %o1, $ileft, %o1
772 or %g1, %o0, %o0
773 srlx %o2, $iright, %o2
774 or %o2, %o1, %o1
775
776.Lcbc_dec_aligned_inp:
777 fmovd %f0, %f4
778 faesdecx %f2, %f6, %f0
779 faesdecx %f4, %f8, %f2
780 ldd [$key + 64], %f6 ! round[4]
781 ldd [$key + 72], %f8
782 add $key, 64, $end
783 sub $rounds, 16*8, $inner
784
785 stx %o0, [%sp + LOCALS + 0]
786 stx %o1, [%sp + LOCALS + 8]
787 add $inp, $inc, $inp ! inp+=16
9515acca 788 nop
d41de45a
AP
789
790.Lcbc_dec_unaligned:
791 fmovd %f0, %f4
792 faesdecx %f2, %f10, %f0
793 faesdecx %f4, %f12, %f2
794 ldd [$end + 16], %f10
795 ldd [$end + 24], %f12
796 add $end, 32, $end
797
798 fmovd %f0, %f4
799 faesdecx %f2, %f6, %f0
800 faesdecx %f4, %f8, %f2
801 ldd [$end + 0], %f6
802 ldd [$end + 8], %f8
803
804 brnz,a $inner, .Lcbc_dec_unaligned
805 sub $inner, 16*2, $inner
806
807 fmovd %f0, %f4
808 faesdecx %f2, %f10, %f0
809 faesdecx %f4, %f12, %f2
810 ldd [$end + 16], %f10 ! round[last-1]
811 ldd [$end + 24], %f12
812
813 fmovd %f0, %f4
814 faesdecx %f2, %f6, %f0
815 faesdecx %f4, %f8, %f2
9515acca 816
d41de45a
AP
817 fxor $iv0, $rlhi, %f6 ! ivec^round[last]
818 fxor $iv1, $rllo, %f8
819 fmovd $in0, $iv0
820 fmovd $in1, $iv1
9515acca
AP
821 ldd [%sp + LOCALS + 0], $in0
822 ldd [%sp + LOCALS + 8], $in1
d41de45a
AP
823
824 fmovd %f0, %f4
825 faesdecx %f2, %f10, %f0
826 faesdecx %f4, %f12, %f2
827 ldd [$key + 16], %f10 ! round[1]
828 ldd [$key + 24], %f12
829
830 fmovd %f0, %f4
831 faesdeclx %f2, %f6, %f0
832 faesdeclx %f4, %f8, %f2
d41de45a 833
9515acca
AP
834 fshiftorx $outhead, %f0, $fshift, %f6
835 fshiftorx %f0, %f2, $fshift, %f8
836 std %f6, [$out + 0]
837 std %f8, [$out + 8]
d41de45a
AP
838 add $out, 16, $out
839
840 brnz,a $len, .Loop_cbc_dec_unaligned_out
8604a6e0 841 sub $len, 1, $len
d41de45a
AP
842
843.Lcbc_dec_unaligned_out_done:
9515acca 844 fshiftorx %f2, %f2, $fshift, %f8
d41de45a
AP
845 stda %f8, [$out + $mask]0xc0 ! partial store
846
847 st $iv0, [$ivp + 0] ! output ivec
848 st $iv0#lo, [$ivp + 4]
849 st $iv1, [$ivp + 8]
850 st $iv1#lo, [$ivp + 12]
851
852 ret
853 restore
854.type aes_fx_cbc_encrypt,#function
855.size aes_fx_cbc_encrypt,.-aes_fx_cbc_encrypt
856___
857}
858{
859my ($inp,$out,$len,$key,$ivp) = map("%i$_",(0..5));
860my ($rounds,$inner,$end,$inc,$ialign,$oalign,$mask) = map("%l$_",(0..7));
9515acca 861my ($ctr0,$ctr1,$r0hi,$r0lo,$rlhi,$rllo,$in0,$in1,$intail,$outhead,$fshift)
d41de45a
AP
862 = map("%f$_",grep { !($_ & 1) } (16 .. 62));
863my ($ileft,$iright) = ($ialign, $oalign);
864my $one = "%f14";
865
866$code.=<<___;
867.globl aes_fx_ctr32_encrypt_blocks
868.align 32
869aes_fx_ctr32_encrypt_blocks:
870 save %sp, -STACK_FRAME-16, %sp
8604a6e0 871 srln $len, 0, $len
8604a6e0 872 and $inp, 7, $ialign
d41de45a 873 andn $inp, 7, $inp
9515acca
AP
874 brz,pn $len, .Lctr32_no_data
875 sll $ialign, 3, $ileft
d41de45a
AP
876
877.Lpic: call .+8
9515acca 878 add %o7, .Linp_align - .Lpic, %o7
d41de45a
AP
879
880 ld [$key + 240], $rounds
881 and $out, 7, $oalign
882 ld [$ivp + 0], $ctr0 ! load counter
9515acca 883 andn $out, 7, $out
d41de45a 884 ld [$ivp + 4], $ctr0#lo
9515acca 885 sll $oalign, 3, $mask
d41de45a
AP
886 ld [$ivp + 8], $ctr1
887 ld [$ivp + 12], $ctr1#lo
9515acca 888 ldd [%o7 + 128], $one
d41de45a
AP
889
890 sll $rounds, 4, $rounds
891 add $rounds, $key, $end
892 ldd [$key + 0], $r0hi ! round[0]
893 ldd [$key + 8], $r0lo
894
895 add $inp, 16, $inp
896 sub $len, 1, $len
897 ldd [$key + 16], %f10 ! round[1]
898 ldd [$key + 24], %f12
899
900 mov 16, $inc
901 movrz $len, 0, $inc
902 ldd [$end + 0], $rlhi ! round[last]
903 ldd [$end + 8], $rllo
904
9515acca
AP
905 ldd [%o7 + $ileft], $fshift ! shiftleft params
906 add %o7, 64, %o7
d41de45a
AP
907 ldd [$inp - 16], $in0 ! load input
908 ldd [$inp - 8], $in1
909 ldda [$inp]0x82, $intail ! non-faulting load
910 add $inp, $inc, $inp ! inp+=16
911
9515acca
AP
912 fshiftorx $in0, $in1, $fshift, $in0
913 fshiftorx $in1, $intail, $fshift, $in1
d41de45a
AP
914
915.Loop_ctr32:
916 fxor $ctr0, $r0hi, %f0 ! counter^round[0]
917 fxor $ctr1, $r0lo, %f2
918 ldd [$key + 32], %f6 ! round[2]
919 ldd [$key + 40], %f8
920 add $key, 32, $end
921 sub $rounds, 16*6, $inner
922
923.Lctr32_enc:
924 fmovd %f0, %f4
925 faesencx %f2, %f10, %f0
926 faesencx %f4, %f12, %f2
927 ldd [$end + 16], %f10
928 ldd [$end + 24], %f12
929 add $end, 32, $end
930
931 fmovd %f0, %f4
932 faesencx %f2, %f6, %f0
933 faesencx %f4, %f8, %f2
934 ldd [$end + 0], %f6
935 ldd [$end + 8], %f8
936
937 brnz,a $inner, .Lctr32_enc
938 sub $inner, 16*2, $inner
939
940 fmovd %f0, %f4
941 faesencx %f2, %f10, %f0
942 faesencx %f4, %f12, %f2
943 ldd [$end + 16], %f10 ! round[last-1]
944 ldd [$end + 24], %f12
945
946 fmovd %f0, %f4
947 faesencx %f2, %f6, %f0
948 faesencx %f4, %f8, %f2
949 fxor $in0, $rlhi, %f6 ! inp^round[last]
950 fxor $in1, $rllo, %f8
951
952 movrz $len, 0, $inc
953 fmovd $intail, $in0
954 ldd [$inp - 8], $in1 ! load next input block
955 ldda [$inp]0x82, $intail ! non-faulting load
956 add $inp, $inc, $inp ! inp+=16
957
958 fmovd %f0, %f4
959 faesencx %f2, %f10, %f0
960 faesencx %f4, %f12, %f2
961 ldd [$key + 16], %f10 ! round[1]
962 ldd [$key + 24], %f12
963
9515acca
AP
964 fshiftorx $in0, $in1, $fshift, $in0
965 fshiftorx $in1, $intail, $fshift, $in1
d41de45a
AP
966 fpadd32 $ctr1, $one, $ctr1 ! increment counter
967
968 fmovd %f0, %f4
969 faesenclx %f2, %f6, %f0
970 faesenclx %f4, %f8, %f2
971
972 brnz,pn $oalign, .Lctr32_unaligned_out
973 nop
974
975 std %f0, [$out + 0]
976 std %f2, [$out + 8]
977 add $out, 16, $out
978
979 brnz,a $len, .Loop_ctr32
980 sub $len, 1, $len
981
982.Lctr32_no_data:
983 ret
984 restore
985
986.align 32
987.Lctr32_unaligned_out:
9515acca 988 ldd [%o7 + $mask], $fshift ! shift right params
d41de45a 989 mov 0xff, $mask
d41de45a
AP
990 srl $mask, $oalign, $mask
991 sub %g0, $ileft, $iright
992
9515acca
AP
993 fshiftorx %f0, %f0, $fshift, %f6
994 fshiftorx %f0, %f2, $fshift, %f8
d41de45a 995
9515acca
AP
996 stda %f6, [$out + $mask]0xc0 ! partial store
997 orn %g0, $mask, $mask
998 std %f8, [$out + 8]
d41de45a
AP
999 add $out, 16, $out
1000 brz $len, .Lctr32_unaligned_out_done
9515acca
AP
1001 sub $len, 1, $len
1002 b .Loop_ctr32_unaligned_out
1003 nop
d41de45a 1004
9515acca 1005.align 32
d41de45a
AP
1006.Loop_ctr32_unaligned_out:
1007 fmovd %f2, $outhead
1008 fxor $ctr0, $r0hi, %f0 ! counter^round[0]
1009 fxor $ctr1, $r0lo, %f2
1010 ldd [$key + 32], %f6 ! round[2]
1011 ldd [$key + 40], %f8
1012
1013 fmovd %f0, %f4
1014 faesencx %f2, %f10, %f0
1015 faesencx %f4, %f12, %f2
1016 ldd [$key + 48], %f10 ! round[3]
1017 ldd [$key + 56], %f12
1018
1019 ldx [$inp - 16], %o0
1020 ldx [$inp - 8], %o1
9515acca 1021 brz $ileft, .Lctr32_aligned_inp
d41de45a
AP
1022 movrz $len, 0, $inc
1023
1024 ldx [$inp], %o2
1025 sllx %o0, $ileft, %o0
1026 srlx %o1, $iright, %g1
1027 sllx %o1, $ileft, %o1
1028 or %g1, %o0, %o0
1029 srlx %o2, $iright, %o2
1030 or %o2, %o1, %o1
1031
1032.Lctr32_aligned_inp:
1033 fmovd %f0, %f4
1034 faesencx %f2, %f6, %f0
1035 faesencx %f4, %f8, %f2
1036 ldd [$key + 64], %f6 ! round[4]
1037 ldd [$key + 72], %f8
1038 add $key, 64, $end
1039 sub $rounds, 16*8, $inner
1040
1041 stx %o0, [%sp + LOCALS + 0]
1042 stx %o1, [%sp + LOCALS + 8]
1043 add $inp, $inc, $inp ! inp+=16
9515acca 1044 nop
d41de45a
AP
1045
1046.Lctr32_enc_unaligned:
1047 fmovd %f0, %f4
1048 faesencx %f2, %f10, %f0
1049 faesencx %f4, %f12, %f2
1050 ldd [$end + 16], %f10
1051 ldd [$end + 24], %f12
1052 add $end, 32, $end
1053
1054 fmovd %f0, %f4
1055 faesencx %f2, %f6, %f0
1056 faesencx %f4, %f8, %f2
1057 ldd [$end + 0], %f6
1058 ldd [$end + 8], %f8
1059
1060 brnz,a $inner, .Lctr32_enc_unaligned
1061 sub $inner, 16*2, $inner
1062
1063 fmovd %f0, %f4
1064 faesencx %f2, %f10, %f0
1065 faesencx %f4, %f12, %f2
1066 ldd [$end + 16], %f10 ! round[last-1]
1067 ldd [$end + 24], %f12
1068 fpadd32 $ctr1, $one, $ctr1 ! increment counter
1069
1070 fmovd %f0, %f4
1071 faesencx %f2, %f6, %f0
1072 faesencx %f4, %f8, %f2
1073 fxor $in0, $rlhi, %f6 ! inp^round[last]
1074 fxor $in1, $rllo, %f8
1075 ldd [%sp + LOCALS + 0], $in0
1076 ldd [%sp + LOCALS + 8], $in1
1077
1078 fmovd %f0, %f4
1079 faesencx %f2, %f10, %f0
1080 faesencx %f4, %f12, %f2
1081 ldd [$key + 16], %f10 ! round[1]
1082 ldd [$key + 24], %f12
1083
1084 fmovd %f0, %f4
1085 faesenclx %f2, %f6, %f0
1086 faesenclx %f4, %f8, %f2
1087
9515acca
AP
1088 fshiftorx $outhead, %f0, $fshift, %f6
1089 fshiftorx %f0, %f2, $fshift, %f8
1090 std %f6, [$out + 0]
1091 std %f8, [$out + 8]
d41de45a
AP
1092 add $out, 16, $out
1093
1094 brnz,a $len, .Loop_ctr32_unaligned_out
1095 sub $len, 1, $len
1096
1097.Lctr32_unaligned_out_done:
9515acca 1098 fshiftorx %f2, %f2, $fshift, %f8
d41de45a
AP
1099 stda %f8, [$out + $mask]0xc0 ! partial store
1100
1101 ret
1102 restore
1103.type aes_fx_ctr32_encrypt_blocks,#function
1104.size aes_fx_ctr32_encrypt_blocks,.-aes_fx_ctr32_encrypt_blocks
9515acca 1105
d41de45a 1106.align 32
9515acca
AP
1107.Linp_align: ! fshiftorx parameters for left shift toward %rs1
1108 .byte 0, 0, 64, 0, 0, 64, 0, -64
1109 .byte 0, 0, 56, 8, 0, 56, 8, -56
1110 .byte 0, 0, 48, 16, 0, 48, 16, -48
1111 .byte 0, 0, 40, 24, 0, 40, 24, -40
1112 .byte 0, 0, 32, 32, 0, 32, 32, -32
1113 .byte 0, 0, 24, 40, 0, 24, 40, -24
1114 .byte 0, 0, 16, 48, 0, 16, 48, -16
1115 .byte 0, 0, 8, 56, 0, 8, 56, -8
1116.Lout_align: ! fshiftorx parameters for right shift toward %rs2
1117 .byte 0, 0, 0, 64, 0, 0, 64, 0
1118 .byte 0, 0, 8, 56, 0, 8, 56, -8
1119 .byte 0, 0, 16, 48, 0, 16, 48, -16
1120 .byte 0, 0, 24, 40, 0, 24, 40, -24
1121 .byte 0, 0, 32, 32, 0, 32, 32, -32
1122 .byte 0, 0, 40, 24, 0, 40, 24, -40
1123 .byte 0, 0, 48, 16, 0, 48, 16, -48
1124 .byte 0, 0, 56, 8, 0, 56, 8, -56
d41de45a
AP
1125.Lone:
1126 .word 0, 1
1127.asciz "AES for Fujitsu SPARC64 X, CRYPTOGAMS by <appro\@openssl.org>"
1128.align 4
1129___
1130}
fb65020b
AP
1131# Purpose of these subroutines is to explicitly encode VIS instructions,
1132# so that one can compile the module without having to specify VIS
1133# extensions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
1134# Idea is to reserve for option to produce "universal" binary and let
1135# programmer detect if current CPU is VIS capable at run-time.
1136sub unvis {
1137my ($mnemonic,$rs1,$rs2,$rd)=@_;
1138my ($ref,$opf);
1139my %visopf = ( "faligndata" => 0x048,
1140 "bshuffle" => 0x04c,
d41de45a 1141 "fpadd32" => 0x052,
fb65020b
AP
1142 "fxor" => 0x06c,
1143 "fsrc2" => 0x078 );
1144
1145 $ref = "$mnemonic\t$rs1,$rs2,$rd";
1146
1147 if ($opf=$visopf{$mnemonic}) {
1148 foreach ($rs1,$rs2,$rd) {
1149 return $ref if (!/%f([0-9]{1,2})/);
1150 $_=$1;
1151 if ($1>=32) {
1152 return $ref if ($1&1);
1153 # re-encode for upper double register addressing
1154 $_=($1|$1>>5)&31;
1155 }
1156 }
1157
1158 return sprintf ".word\t0x%08x !%s",
1159 0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
1160 $ref;
1161 } else {
1162 return $ref;
1163 }
1164}
1165
1166sub unvis3 {
1167my ($mnemonic,$rs1,$rs2,$rd)=@_;
1168my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 );
1169my ($ref,$opf);
1170my %visopf = ( "alignaddr" => 0x018,
1171 "bmask" => 0x019,
1172 "alignaddrl" => 0x01a );
1173
1174 $ref = "$mnemonic\t$rs1,$rs2,$rd";
1175
1176 if ($opf=$visopf{$mnemonic}) {
1177 foreach ($rs1,$rs2,$rd) {
1178 return $ref if (!/%([goli])([0-9])/);
1179 $_=$bias{$1}+$2;
1180 }
1181
1182 return sprintf ".word\t0x%08x !%s",
1183 0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
1184 $ref;
1185 } else {
1186 return $ref;
1187 }
1188}
1189
1190sub unfx {
1191my ($mnemonic,$rs1,$rs2,$rd)=@_;
1192my ($ref,$opf);
1193my %aesopf = ( "faesencx" => 0x90,
1194 "faesdecx" => 0x91,
1195 "faesenclx" => 0x92,
1196 "faesdeclx" => 0x93,
1197 "faeskeyx" => 0x94 );
1198
1199 $ref = "$mnemonic\t$rs1,$rs2,$rd";
1200
1201 if (defined($opf=$aesopf{$mnemonic})) {
1202 $rs2 = ($rs2 =~ /%f([0-6]*[02468])/) ? (($1|$1>>5)&31) : $rs2;
1203 $rs2 = oct($rs2) if ($rs2 =~ /^0/);
1204
1205 foreach ($rs1,$rd) {
1206 return $ref if (!/%f([0-9]{1,2})/);
1207 $_=$1;
1208 if ($1>=32) {
1209 return $ref if ($1&1);
1210 # re-encode for upper double register addressing
1211 $_=($1|$1>>5)&31;
1212 }
1213 }
1214
1215 return sprintf ".word\t0x%08x !%s",
1216 2<<30|$rd<<25|0x36<<19|$rs1<<14|$opf<<5|$rs2,
1217 $ref;
1218 } else {
1219 return $ref;
1220 }
1221}
1222
9515acca
AP
1223sub unfx3src {
1224my ($mnemonic,$rs1,$rs2,$rs3,$rd)=@_;
1225my ($ref,$opf);
1226my %aesopf = ( "fshiftorx" => 0x0b );
1227
1228 $ref = "$mnemonic\t$rs1,$rs2,$rs3,$rd";
1229
1230 if (defined($opf=$aesopf{$mnemonic})) {
1231 foreach ($rs1,$rs2,$rs3,$rd) {
1232 return $ref if (!/%f([0-9]{1,2})/);
1233 $_=$1;
1234 if ($1>=32) {
1235 return $ref if ($1&1);
1236 # re-encode for upper double register addressing
1237 $_=($1|$1>>5)&31;
1238 }
1239 }
1240
1241 return sprintf ".word\t0x%08x !%s",
1242 2<<30|$rd<<25|0x37<<19|$rs1<<14|$rs3<<9|$opf<<5|$rs2,
1243 $ref;
1244 } else {
1245 return $ref;
1246 }
1247}
1248
fb65020b
AP
1249foreach (split("\n",$code)) {
1250 s/\`([^\`]*)\`/eval $1/ge;
1251
d41de45a
AP
1252 s/%f([0-9]+)#lo/sprintf "%%f%d",$1+1/ge;
1253
fb65020b 1254 s/\b(faes[^x]{3,4}x)\s+(%f[0-9]{1,2}),\s*([%fx0-9]+),\s*(%f[0-9]{1,2})/
9515acca
AP
1255 &unfx($1,$2,$3,$4)
1256 /ge or
1257 s/\b([f][^\s]*)\s+(%f[0-9]{1,2}),\s*(%f[0-9]{1,2}),\s*(%f[0-9]{1,2}),\s*(%f[0-9]{1,2})/
1258 &unfx3src($1,$2,$3,$4,$5)
fb65020b
AP
1259 /ge or
1260 s/\b([fb][^\s]*)\s+(%f[0-9]{1,2}),\s*(%f[0-9]{1,2}),\s*(%f[0-9]{1,2})/
1261 &unvis($1,$2,$3,$4)
1262 /ge or
1263 s/\b(alignaddr[l]*)\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/
1264 &unvis3($1,$2,$3,$4)
1265 /ge;
1266 print $_,"\n";
1267}
1268
a21314db 1269close STDOUT or die "error closing STDOUT: $!";