]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/modes/asm/ghash-armv4.pl
Add OpenSSL copyright to .pl files
[thirdparty/openssl.git] / crypto / modes / asm / ghash-armv4.pl
1 #! /usr/bin/env perl
2 # Copyright 2010-2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9 #
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16 #
17 # April 2010
18 #
19 # The module implements "4-bit" GCM GHASH function and underlying
20 # single multiplication operation in GF(2^128). "4-bit" means that it
21 # uses 256 bytes per-key table [+32 bytes shared table]. There is no
22 # experimental performance data available yet. The only approximation
23 # that can be made at this point is based on code size. Inner loop is
24 # 32 instructions long and on single-issue core should execute in <40
25 # cycles. Having verified that gcc 3.4 didn't unroll corresponding
26 # loop, this assembler loop body was found to be ~3x smaller than
27 # compiler-generated one...
28 #
29 # July 2010
30 #
31 # Rescheduling for dual-issue pipeline resulted in 8.5% improvement on
32 # Cortex A8 core and ~25 cycles per processed byte (which was observed
33 # to be ~3 times faster than gcc-generated code:-)
34 #
35 # February 2011
36 #
37 # Profiler-assisted and platform-specific optimization resulted in 7%
38 # improvement on Cortex A8 core and ~23.5 cycles per byte.
39 #
40 # March 2011
41 #
42 # Add NEON implementation featuring polynomial multiplication, i.e. no
43 # lookup tables involved. On Cortex A8 it was measured to process one
44 # byte in 15 cycles or 55% faster than integer-only code.
45 #
46 # April 2014
47 #
48 # Switch to multiplication algorithm suggested in paper referred
49 # below and combine it with reduction algorithm from x86 module.
50 # Performance improvement over previous version varies from 65% on
51 # Snapdragon S4 to 110% on Cortex A9. In absolute terms Cortex A8
52 # processes one byte in 8.45 cycles, A9 - in 10.2, A15 - in 7.63,
53 # Snapdragon S4 - in 9.33.
54 #
55 # Câmara, D.; Gouvêa, C. P. L.; López, J. & Dahab, R.: Fast Software
56 # Polynomial Multiplication on ARM Processors using the NEON Engine.
57 #
58 # http://conradoplg.cryptoland.net/files/2010/12/mocrysen13.pdf
59
60 # ====================================================================
61 # Note about "528B" variant. In ARM case it makes lesser sense to
62 # implement it for following reasons:
63 #
64 # - performance improvement won't be anywhere near 50%, because 128-
65 # bit shift operation is neatly fused with 128-bit xor here, and
66 # "538B" variant would eliminate only 4-5 instructions out of 32
67 # in the inner loop (meaning that estimated improvement is ~15%);
68 # - ARM-based systems are often embedded ones and extra memory
69 # consumption might be unappreciated (for so little improvement);
70 #
71 # Byte order [in]dependence. =========================================
72 #
73 # Caller is expected to maintain specific *dword* order in Htable,
74 # namely with *least* significant dword of 128-bit value at *lower*
75 # address. This differs completely from C code and has everything to
76 # do with ldm instruction and order in which dwords are "consumed" by
77 # algorithm. *Byte* order within these dwords in turn is whatever
78 # *native* byte order on current platform. See gcm128.c for working
79 # example...
80
81 $flavour = shift;
82 if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
83 else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
84
85 if ($flavour && $flavour ne "void") {
86 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
87 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
88 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
89 die "can't locate arm-xlate.pl";
90
91 open STDOUT,"| \"$^X\" $xlate $flavour $output";
92 } else {
93 open STDOUT,">$output";
94 }
95
96 $Xi="r0"; # argument block
97 $Htbl="r1";
98 $inp="r2";
99 $len="r3";
100
101 $Zll="r4"; # variables
102 $Zlh="r5";
103 $Zhl="r6";
104 $Zhh="r7";
105 $Tll="r8";
106 $Tlh="r9";
107 $Thl="r10";
108 $Thh="r11";
109 $nlo="r12";
110 ################# r13 is stack pointer
111 $nhi="r14";
112 ################# r15 is program counter
113
114 $rem_4bit=$inp; # used in gcm_gmult_4bit
115 $cnt=$len;
116
117 sub Zsmash() {
118 my $i=12;
119 my @args=@_;
120 for ($Zll,$Zlh,$Zhl,$Zhh) {
121 $code.=<<___;
122 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
123 rev $_,$_
124 str $_,[$Xi,#$i]
125 #elif defined(__ARMEB__)
126 str $_,[$Xi,#$i]
127 #else
128 mov $Tlh,$_,lsr#8
129 strb $_,[$Xi,#$i+3]
130 mov $Thl,$_,lsr#16
131 strb $Tlh,[$Xi,#$i+2]
132 mov $Thh,$_,lsr#24
133 strb $Thl,[$Xi,#$i+1]
134 strb $Thh,[$Xi,#$i]
135 #endif
136 ___
137 $code.="\t".shift(@args)."\n";
138 $i-=4;
139 }
140 }
141
142 $code=<<___;
143 #include "arm_arch.h"
144
145 .text
146 #if defined(__thumb2__)
147 .syntax unified
148 .thumb
149 #else
150 .code 32
151 #endif
152
153 #ifdef __clang__
154 #define ldrplb ldrbpl
155 #define ldrneb ldrbne
156 #endif
157
158 .type rem_4bit,%object
159 .align 5
160 rem_4bit:
161 .short 0x0000,0x1C20,0x3840,0x2460
162 .short 0x7080,0x6CA0,0x48C0,0x54E0
163 .short 0xE100,0xFD20,0xD940,0xC560
164 .short 0x9180,0x8DA0,0xA9C0,0xB5E0
165 .size rem_4bit,.-rem_4bit
166
167 .type rem_4bit_get,%function
168 rem_4bit_get:
169 #if defined(__thumb2__)
170 adr $rem_4bit,rem_4bit
171 #else
172 sub $rem_4bit,pc,#8+32 @ &rem_4bit
173 #endif
174 b .Lrem_4bit_got
175 nop
176 nop
177 .size rem_4bit_get,.-rem_4bit_get
178
179 .global gcm_ghash_4bit
180 .type gcm_ghash_4bit,%function
181 .align 4
182 gcm_ghash_4bit:
183 #if defined(__thumb2__)
184 adr r12,rem_4bit
185 #else
186 sub r12,pc,#8+48 @ &rem_4bit
187 #endif
188 add $len,$inp,$len @ $len to point at the end
189 stmdb sp!,{r3-r11,lr} @ save $len/end too
190
191 ldmia r12,{r4-r11} @ copy rem_4bit ...
192 stmdb sp!,{r4-r11} @ ... to stack
193
194 ldrb $nlo,[$inp,#15]
195 ldrb $nhi,[$Xi,#15]
196 .Louter:
197 eor $nlo,$nlo,$nhi
198 and $nhi,$nlo,#0xf0
199 and $nlo,$nlo,#0x0f
200 mov $cnt,#14
201
202 add $Zhh,$Htbl,$nlo,lsl#4
203 ldmia $Zhh,{$Zll-$Zhh} @ load Htbl[nlo]
204 add $Thh,$Htbl,$nhi
205 ldrb $nlo,[$inp,#14]
206
207 and $nhi,$Zll,#0xf @ rem
208 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi]
209 add $nhi,$nhi,$nhi
210 eor $Zll,$Tll,$Zll,lsr#4
211 ldrh $Tll,[sp,$nhi] @ rem_4bit[rem]
212 eor $Zll,$Zll,$Zlh,lsl#28
213 ldrb $nhi,[$Xi,#14]
214 eor $Zlh,$Tlh,$Zlh,lsr#4
215 eor $Zlh,$Zlh,$Zhl,lsl#28
216 eor $Zhl,$Thl,$Zhl,lsr#4
217 eor $Zhl,$Zhl,$Zhh,lsl#28
218 eor $Zhh,$Thh,$Zhh,lsr#4
219 eor $nlo,$nlo,$nhi
220 and $nhi,$nlo,#0xf0
221 and $nlo,$nlo,#0x0f
222 eor $Zhh,$Zhh,$Tll,lsl#16
223
224 .Linner:
225 add $Thh,$Htbl,$nlo,lsl#4
226 and $nlo,$Zll,#0xf @ rem
227 subs $cnt,$cnt,#1
228 add $nlo,$nlo,$nlo
229 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nlo]
230 eor $Zll,$Tll,$Zll,lsr#4
231 eor $Zll,$Zll,$Zlh,lsl#28
232 eor $Zlh,$Tlh,$Zlh,lsr#4
233 eor $Zlh,$Zlh,$Zhl,lsl#28
234 ldrh $Tll,[sp,$nlo] @ rem_4bit[rem]
235 eor $Zhl,$Thl,$Zhl,lsr#4
236 #ifdef __thumb2__
237 it pl
238 #endif
239 ldrplb $nlo,[$inp,$cnt]
240 eor $Zhl,$Zhl,$Zhh,lsl#28
241 eor $Zhh,$Thh,$Zhh,lsr#4
242
243 add $Thh,$Htbl,$nhi
244 and $nhi,$Zll,#0xf @ rem
245 eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem]
246 add $nhi,$nhi,$nhi
247 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi]
248 eor $Zll,$Tll,$Zll,lsr#4
249 #ifdef __thumb2__
250 it pl
251 #endif
252 ldrplb $Tll,[$Xi,$cnt]
253 eor $Zll,$Zll,$Zlh,lsl#28
254 eor $Zlh,$Tlh,$Zlh,lsr#4
255 ldrh $Tlh,[sp,$nhi]
256 eor $Zlh,$Zlh,$Zhl,lsl#28
257 eor $Zhl,$Thl,$Zhl,lsr#4
258 eor $Zhl,$Zhl,$Zhh,lsl#28
259 #ifdef __thumb2__
260 it pl
261 #endif
262 eorpl $nlo,$nlo,$Tll
263 eor $Zhh,$Thh,$Zhh,lsr#4
264 #ifdef __thumb2__
265 itt pl
266 #endif
267 andpl $nhi,$nlo,#0xf0
268 andpl $nlo,$nlo,#0x0f
269 eor $Zhh,$Zhh,$Tlh,lsl#16 @ ^= rem_4bit[rem]
270 bpl .Linner
271
272 ldr $len,[sp,#32] @ re-load $len/end
273 add $inp,$inp,#16
274 mov $nhi,$Zll
275 ___
276 &Zsmash("cmp\t$inp,$len","\n".
277 "#ifdef __thumb2__\n".
278 " it ne\n".
279 "#endif\n".
280 " ldrneb $nlo,[$inp,#15]");
281 $code.=<<___;
282 bne .Louter
283
284 add sp,sp,#36
285 #if __ARM_ARCH__>=5
286 ldmia sp!,{r4-r11,pc}
287 #else
288 ldmia sp!,{r4-r11,lr}
289 tst lr,#1
290 moveq pc,lr @ be binary compatible with V4, yet
291 bx lr @ interoperable with Thumb ISA:-)
292 #endif
293 .size gcm_ghash_4bit,.-gcm_ghash_4bit
294
295 .global gcm_gmult_4bit
296 .type gcm_gmult_4bit,%function
297 gcm_gmult_4bit:
298 stmdb sp!,{r4-r11,lr}
299 ldrb $nlo,[$Xi,#15]
300 b rem_4bit_get
301 .Lrem_4bit_got:
302 and $nhi,$nlo,#0xf0
303 and $nlo,$nlo,#0x0f
304 mov $cnt,#14
305
306 add $Zhh,$Htbl,$nlo,lsl#4
307 ldmia $Zhh,{$Zll-$Zhh} @ load Htbl[nlo]
308 ldrb $nlo,[$Xi,#14]
309
310 add $Thh,$Htbl,$nhi
311 and $nhi,$Zll,#0xf @ rem
312 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi]
313 add $nhi,$nhi,$nhi
314 eor $Zll,$Tll,$Zll,lsr#4
315 ldrh $Tll,[$rem_4bit,$nhi] @ rem_4bit[rem]
316 eor $Zll,$Zll,$Zlh,lsl#28
317 eor $Zlh,$Tlh,$Zlh,lsr#4
318 eor $Zlh,$Zlh,$Zhl,lsl#28
319 eor $Zhl,$Thl,$Zhl,lsr#4
320 eor $Zhl,$Zhl,$Zhh,lsl#28
321 eor $Zhh,$Thh,$Zhh,lsr#4
322 and $nhi,$nlo,#0xf0
323 eor $Zhh,$Zhh,$Tll,lsl#16
324 and $nlo,$nlo,#0x0f
325
326 .Loop:
327 add $Thh,$Htbl,$nlo,lsl#4
328 and $nlo,$Zll,#0xf @ rem
329 subs $cnt,$cnt,#1
330 add $nlo,$nlo,$nlo
331 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nlo]
332 eor $Zll,$Tll,$Zll,lsr#4
333 eor $Zll,$Zll,$Zlh,lsl#28
334 eor $Zlh,$Tlh,$Zlh,lsr#4
335 eor $Zlh,$Zlh,$Zhl,lsl#28
336 ldrh $Tll,[$rem_4bit,$nlo] @ rem_4bit[rem]
337 eor $Zhl,$Thl,$Zhl,lsr#4
338 #ifdef __thumb2__
339 it pl
340 #endif
341 ldrplb $nlo,[$Xi,$cnt]
342 eor $Zhl,$Zhl,$Zhh,lsl#28
343 eor $Zhh,$Thh,$Zhh,lsr#4
344
345 add $Thh,$Htbl,$nhi
346 and $nhi,$Zll,#0xf @ rem
347 eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem]
348 add $nhi,$nhi,$nhi
349 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi]
350 eor $Zll,$Tll,$Zll,lsr#4
351 eor $Zll,$Zll,$Zlh,lsl#28
352 eor $Zlh,$Tlh,$Zlh,lsr#4
353 ldrh $Tll,[$rem_4bit,$nhi] @ rem_4bit[rem]
354 eor $Zlh,$Zlh,$Zhl,lsl#28
355 eor $Zhl,$Thl,$Zhl,lsr#4
356 eor $Zhl,$Zhl,$Zhh,lsl#28
357 eor $Zhh,$Thh,$Zhh,lsr#4
358 #ifdef __thumb2__
359 itt pl
360 #endif
361 andpl $nhi,$nlo,#0xf0
362 andpl $nlo,$nlo,#0x0f
363 eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem]
364 bpl .Loop
365 ___
366 &Zsmash();
367 $code.=<<___;
368 #if __ARM_ARCH__>=5
369 ldmia sp!,{r4-r11,pc}
370 #else
371 ldmia sp!,{r4-r11,lr}
372 tst lr,#1
373 moveq pc,lr @ be binary compatible with V4, yet
374 bx lr @ interoperable with Thumb ISA:-)
375 #endif
376 .size gcm_gmult_4bit,.-gcm_gmult_4bit
377 ___
378 {
379 my ($Xl,$Xm,$Xh,$IN)=map("q$_",(0..3));
380 my ($t0,$t1,$t2,$t3)=map("q$_",(8..12));
381 my ($Hlo,$Hhi,$Hhl,$k48,$k32,$k16)=map("d$_",(26..31));
382
383 sub clmul64x64 {
384 my ($r,$a,$b)=@_;
385 $code.=<<___;
386 vext.8 $t0#lo, $a, $a, #1 @ A1
387 vmull.p8 $t0, $t0#lo, $b @ F = A1*B
388 vext.8 $r#lo, $b, $b, #1 @ B1
389 vmull.p8 $r, $a, $r#lo @ E = A*B1
390 vext.8 $t1#lo, $a, $a, #2 @ A2
391 vmull.p8 $t1, $t1#lo, $b @ H = A2*B
392 vext.8 $t3#lo, $b, $b, #2 @ B2
393 vmull.p8 $t3, $a, $t3#lo @ G = A*B2
394 vext.8 $t2#lo, $a, $a, #3 @ A3
395 veor $t0, $t0, $r @ L = E + F
396 vmull.p8 $t2, $t2#lo, $b @ J = A3*B
397 vext.8 $r#lo, $b, $b, #3 @ B3
398 veor $t1, $t1, $t3 @ M = G + H
399 vmull.p8 $r, $a, $r#lo @ I = A*B3
400 veor $t0#lo, $t0#lo, $t0#hi @ t0 = (L) (P0 + P1) << 8
401 vand $t0#hi, $t0#hi, $k48
402 vext.8 $t3#lo, $b, $b, #4 @ B4
403 veor $t1#lo, $t1#lo, $t1#hi @ t1 = (M) (P2 + P3) << 16
404 vand $t1#hi, $t1#hi, $k32
405 vmull.p8 $t3, $a, $t3#lo @ K = A*B4
406 veor $t2, $t2, $r @ N = I + J
407 veor $t0#lo, $t0#lo, $t0#hi
408 veor $t1#lo, $t1#lo, $t1#hi
409 veor $t2#lo, $t2#lo, $t2#hi @ t2 = (N) (P4 + P5) << 24
410 vand $t2#hi, $t2#hi, $k16
411 vext.8 $t0, $t0, $t0, #15
412 veor $t3#lo, $t3#lo, $t3#hi @ t3 = (K) (P6 + P7) << 32
413 vmov.i64 $t3#hi, #0
414 vext.8 $t1, $t1, $t1, #14
415 veor $t2#lo, $t2#lo, $t2#hi
416 vmull.p8 $r, $a, $b @ D = A*B
417 vext.8 $t3, $t3, $t3, #12
418 vext.8 $t2, $t2, $t2, #13
419 veor $t0, $t0, $t1
420 veor $t2, $t2, $t3
421 veor $r, $r, $t0
422 veor $r, $r, $t2
423 ___
424 }
425
426 $code.=<<___;
427 #if __ARM_MAX_ARCH__>=7
428 .arch armv7-a
429 .fpu neon
430
431 .global gcm_init_neon
432 .type gcm_init_neon,%function
433 .align 4
434 gcm_init_neon:
435 vld1.64 $IN#hi,[r1]! @ load H
436 vmov.i8 $t0,#0xe1
437 vld1.64 $IN#lo,[r1]
438 vshl.i64 $t0#hi,#57
439 vshr.u64 $t0#lo,#63 @ t0=0xc2....01
440 vdup.8 $t1,$IN#hi[7]
441 vshr.u64 $Hlo,$IN#lo,#63
442 vshr.s8 $t1,#7 @ broadcast carry bit
443 vshl.i64 $IN,$IN,#1
444 vand $t0,$t0,$t1
445 vorr $IN#hi,$Hlo @ H<<<=1
446 veor $IN,$IN,$t0 @ twisted H
447 vstmia r0,{$IN}
448
449 ret @ bx lr
450 .size gcm_init_neon,.-gcm_init_neon
451
452 .global gcm_gmult_neon
453 .type gcm_gmult_neon,%function
454 .align 4
455 gcm_gmult_neon:
456 vld1.64 $IN#hi,[$Xi]! @ load Xi
457 vld1.64 $IN#lo,[$Xi]!
458 vmov.i64 $k48,#0x0000ffffffffffff
459 vldmia $Htbl,{$Hlo-$Hhi} @ load twisted H
460 vmov.i64 $k32,#0x00000000ffffffff
461 #ifdef __ARMEL__
462 vrev64.8 $IN,$IN
463 #endif
464 vmov.i64 $k16,#0x000000000000ffff
465 veor $Hhl,$Hlo,$Hhi @ Karatsuba pre-processing
466 mov $len,#16
467 b .Lgmult_neon
468 .size gcm_gmult_neon,.-gcm_gmult_neon
469
470 .global gcm_ghash_neon
471 .type gcm_ghash_neon,%function
472 .align 4
473 gcm_ghash_neon:
474 vld1.64 $Xl#hi,[$Xi]! @ load Xi
475 vld1.64 $Xl#lo,[$Xi]!
476 vmov.i64 $k48,#0x0000ffffffffffff
477 vldmia $Htbl,{$Hlo-$Hhi} @ load twisted H
478 vmov.i64 $k32,#0x00000000ffffffff
479 #ifdef __ARMEL__
480 vrev64.8 $Xl,$Xl
481 #endif
482 vmov.i64 $k16,#0x000000000000ffff
483 veor $Hhl,$Hlo,$Hhi @ Karatsuba pre-processing
484
485 .Loop_neon:
486 vld1.64 $IN#hi,[$inp]! @ load inp
487 vld1.64 $IN#lo,[$inp]!
488 #ifdef __ARMEL__
489 vrev64.8 $IN,$IN
490 #endif
491 veor $IN,$Xl @ inp^=Xi
492 .Lgmult_neon:
493 ___
494 &clmul64x64 ($Xl,$Hlo,"$IN#lo"); # H.lo·Xi.lo
495 $code.=<<___;
496 veor $IN#lo,$IN#lo,$IN#hi @ Karatsuba pre-processing
497 ___
498 &clmul64x64 ($Xm,$Hhl,"$IN#lo"); # (H.lo+H.hi)·(Xi.lo+Xi.hi)
499 &clmul64x64 ($Xh,$Hhi,"$IN#hi"); # H.hi·Xi.hi
500 $code.=<<___;
501 veor $Xm,$Xm,$Xl @ Karatsuba post-processing
502 veor $Xm,$Xm,$Xh
503 veor $Xl#hi,$Xl#hi,$Xm#lo
504 veor $Xh#lo,$Xh#lo,$Xm#hi @ Xh|Xl - 256-bit result
505
506 @ equivalent of reduction_avx from ghash-x86_64.pl
507 vshl.i64 $t1,$Xl,#57 @ 1st phase
508 vshl.i64 $t2,$Xl,#62
509 veor $t2,$t2,$t1 @
510 vshl.i64 $t1,$Xl,#63
511 veor $t2, $t2, $t1 @
512 veor $Xl#hi,$Xl#hi,$t2#lo @
513 veor $Xh#lo,$Xh#lo,$t2#hi
514
515 vshr.u64 $t2,$Xl,#1 @ 2nd phase
516 veor $Xh,$Xh,$Xl
517 veor $Xl,$Xl,$t2 @
518 vshr.u64 $t2,$t2,#6
519 vshr.u64 $Xl,$Xl,#1 @
520 veor $Xl,$Xl,$Xh @
521 veor $Xl,$Xl,$t2 @
522
523 subs $len,#16
524 bne .Loop_neon
525
526 #ifdef __ARMEL__
527 vrev64.8 $Xl,$Xl
528 #endif
529 sub $Xi,#16
530 vst1.64 $Xl#hi,[$Xi]! @ write out Xi
531 vst1.64 $Xl#lo,[$Xi]
532
533 ret @ bx lr
534 .size gcm_ghash_neon,.-gcm_ghash_neon
535 #endif
536 ___
537 }
538 $code.=<<___;
539 .asciz "GHASH for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
540 .align 2
541 ___
542
543 foreach (split("\n",$code)) {
544 s/\`([^\`]*)\`/eval $1/geo;
545
546 s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or
547 s/\bret\b/bx lr/go or
548 s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4
549
550 print $_,"\n";
551 }
552 close STDOUT; # enforce flush