]> git.ipfire.org Git - thirdparty/openssl.git/commitdiff
Revert "Move rodata to .rodata section for armv8"
authorAlexey Moksyakov <yavtuk@yandex.ru>
Tue, 21 Jan 2025 09:24:27 +0000 (12:24 +0300)
committerTomas Mraz <tomas@openssl.org>
Tue, 21 Jan 2025 17:21:19 +0000 (18:21 +0100)
This reverts commit 5b36728d974578f2c74e9f3d2ee6472187084882.

Issue #26458

Reviewed-by: Neil Horman <nhorman@openssl.org>
Reviewed-by: Tomas Mraz <tomas@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/26498)

crypto/sm4/asm/sm4-armv8.pl
crypto/sm4/asm/vpsm4-armv8.pl
crypto/sm4/asm/vpsm4_ex-armv8.pl

index 0bf27ddb04d51e2df1bc9be3e2200d1a09e6d99d..7358a6e6a2cffc544501000a7fef75c807731d43 100755 (executable)
@@ -118,10 +118,7 @@ ___
 
 {{{
 $code.=<<___;
-.rodata
 .align 6
-.type _${prefix}_consts,%object
-_${prefix}_consts:
 .Lck:
        .long 0x00070E15, 0x1C232A31, 0x383F464D, 0x545B6269
        .long 0x70777E85, 0x8C939AA1, 0xA8AFB6BD, 0xC4CBD2D9
@@ -133,9 +130,6 @@ _${prefix}_consts:
        .long 0x10171E25, 0x2C333A41, 0x484F565D, 0x646B7279
 .Lfk:
        .long 0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc
-.size _${prefix}_consts,.-_${prefix}_consts
-
-.previous
 ___
 }}}
 
@@ -152,11 +146,9 @@ $code.=<<___;
 ${prefix}_set_encrypt_key:
        AARCH64_VALID_CALL_TARGET
        ld1     {$key0.4s},[$key]
-       adrp    $tmp, _${prefix}_consts
-       add     $tmp,$tmp,#:lo12:.Lfk
+       adr     $tmp,.Lfk
        ld1     {$fkconst.4s},[$tmp]
-       adrp    $tmp, _${prefix}_consts
-       add     $tmp,$tmp,#:lo12:.Lck
+       adr     $tmp,.Lck
        ld1     {$const0.4s,$const1.4s,$const2.4s,$const3.4s},[$tmp],64
 ___
        &rev32($key0, $key0);
@@ -191,11 +183,9 @@ $code.=<<___;
 ${prefix}_set_decrypt_key:
        AARCH64_VALID_CALL_TARGET
        ld1     {$key0.4s},[$key]
-       adrp    $tmp, _${prefix}_consts
-       add     $tmp,$tmp,#:lo12:.Lfk
+       adr     $tmp,.Lfk
        ld1     {$fkconst.4s},[$tmp]
-       adrp    $tmp, _${prefix}_consts
-       add     $tmp,$tmp,#:lo12:.Lck
+       adr     $tmp, .Lck
        ld1     {$const0.4s,$const1.4s,$const2.4s,$const3.4s},[$tmp],64
 ___
        &rev32($key0, $key0);
index 115bbc76da4a145c46d8e1d00a65fba13c07cbc6..78357676a56dfad4e011d068958ce2d69ab1f37c 100755 (executable)
@@ -474,8 +474,7 @@ sub load_sbox () {
        my $data = shift;
 
 $code.=<<___;
-       adrp    $ptr, _${prefix}_consts
-       add     $ptr,$ptr,#:lo12:.Lsbox
+       adr     $ptr,.Lsbox
        ld1     {@sbox[0].16b,@sbox[1].16b,@sbox[2].16b,@sbox[3].16b},[$ptr],#64
        ld1     {@sbox[4].16b,@sbox[5].16b,@sbox[6].16b,@sbox[7].16b},[$ptr],#64
        ld1     {@sbox[8].16b,@sbox[9].16b,@sbox[10].16b,@sbox[11].16b},[$ptr],#64
@@ -525,8 +524,7 @@ sub compute_tweak_vec() {
        my $std = shift;
        &rbit(@vtmp[2],$src,$std);
 $code.=<<___;
-       adrp $xtmp2, _${prefix}_consts
-       ldr  @qtmp[0], [$xtmp2,#:lo12:.Lxts_magic]
+       ldr  @qtmp[0], .Lxts_magic
        shl  $des.16b, @vtmp[2].16b, #1
        ext  @vtmp[1].16b, @vtmp[2].16b, @vtmp[2].16b,#15
        ushr @vtmp[1].16b, @vtmp[1].16b, #7
@@ -541,10 +539,9 @@ $code=<<___;
 .arch  armv8-a
 .text
 
-.rodata
-.type  _${prefix}_consts,%object
+.type  _vpsm4_consts,%object
 .align 7
-_${prefix}_consts:
+_vpsm4_consts:
 .Lsbox:
        .byte 0xD6,0x90,0xE9,0xFE,0xCC,0xE1,0x3D,0xB7,0x16,0xB6,0x14,0xC2,0x28,0xFB,0x2C,0x05
        .byte 0x2B,0x67,0x9A,0x76,0x2A,0xBE,0x04,0xC3,0xAA,0x44,0x13,0x26,0x49,0x86,0x06,0x99
@@ -578,8 +575,7 @@ _${prefix}_consts:
 .Lxts_magic:
        .quad 0x0101010101010187,0x0101010101010101
 
-.size  _${prefix}_consts,.-_${prefix}_consts
-.previous
+.size  _vpsm4_consts,.-_vpsm4_consts
 ___
 
 {{{
@@ -596,16 +592,13 @@ ___
        &load_sbox();
        &rev32($vkey,$vkey);
 $code.=<<___;
-       adrp    $pointer, _${prefix}_consts
-       add     $pointer,$pointer,#:lo12:.Lshuffles
+       adr     $pointer,.Lshuffles
        ld1     {$vmap.2d},[$pointer]
-       adrp    $pointer, _${prefix}_consts
-       add     $pointer,$pointer,#:lo12:.Lfk
+       adr     $pointer,.Lfk
        ld1     {$vfk.2d},[$pointer]
        eor     $vkey.16b,$vkey.16b,$vfk.16b
        mov     $schedules,#32
-       adrp    $pointer, _${prefix}_consts
-       add     $pointer,$pointer,#:lo12:.Lck
+       adr     $pointer,.Lck
        movi    @vtmp[0].16b,#64
        cbnz    $enc,1f
        add     $keys,$keys,124
index 2bbdb3e5b5b5f8419cb197155cb454857a09d74a..f4bcdbad1bc1f18b3acf88fd4ad503747c7a026e 100644 (file)
@@ -475,13 +475,12 @@ sub load_sbox () {
        my $data = shift;
 
 $code.=<<___;
-       adrp $xtmp1, _${prefix}_consts
-       ldr $MaskQ, [$xtmp1, #:lo12:.Lsbox_magic]
-       ldr $TAHMatQ, [$xtmp1, #:lo12:.Lsbox_magic+16]
-       ldr $TALMatQ, [$xtmp1, #:lo12:.Lsbox_magic+32]
-       ldr $ATAHMatQ, [$xtmp1, #:lo12:.Lsbox_magic+48]
-       ldr $ATALMatQ, [$xtmp1, #:lo12:.Lsbox_magic+64]
-       ldr $ANDMaskQ, [$xtmp1, #:lo12:.Lsbox_magic+80]
+       ldr $MaskQ, .Lsbox_magic
+       ldr $TAHMatQ, .Lsbox_magic+16
+       ldr $TALMatQ, .Lsbox_magic+32
+       ldr $ATAHMatQ, .Lsbox_magic+48
+       ldr $ATALMatQ, .Lsbox_magic+64
+       ldr $ANDMaskQ, .Lsbox_magic+80
 ___
 }
 
@@ -526,8 +525,7 @@ sub compute_tweak_vec() {
        my $std = shift;
        &rbit(@vtmp[2],$src,$std);
 $code.=<<___;
-       adrp $xtmp2, _${prefix}_consts
-       ldr  @qtmp[0], [$xtmp2, #:lo12:.Lxts_magic]
+       ldr  @qtmp[0], .Lxts_magic
        shl  $des.16b, @vtmp[2].16b, #1
        ext  @vtmp[1].16b, @vtmp[2].16b, @vtmp[2].16b,#15
        ushr @vtmp[1].16b, @vtmp[1].16b, #7
@@ -542,7 +540,6 @@ $code=<<___;
 .arch  armv8-a+crypto
 .text
 
-.rodata
 .type  _${prefix}_consts,%object
 .align 7
 _${prefix}_consts:
@@ -570,7 +567,6 @@ _${prefix}_consts:
        .quad 0x0f0f0f0f0f0f0f0f,0x0f0f0f0f0f0f0f0f
 
 .size  _${prefix}_consts,.-_${prefix}_consts
-.previous
 ___
 
 {{{
@@ -587,16 +583,13 @@ ___
        &load_sbox();
        &rev32($vkey,$vkey);
 $code.=<<___;
-       adrp    $pointer,_${prefix}_consts
-       add     $pointer,$pointer,#:lo12:.Lshuffles
+       adr     $pointer,.Lshuffles
        ld1     {$vmap.2d},[$pointer]
-       adrp    $pointer,_${prefix}_consts
-       add     $pointer,$pointer,#:lo12:.Lfk
+       adr     $pointer,.Lfk
        ld1     {$vfk.2d},[$pointer]
        eor     $vkey.16b,$vkey.16b,$vfk.16b
        mov     $schedules,#32
-       adrp    $pointer,_${prefix}_consts
-       add     $pointer,$pointer,#:lo12:.Lck
+       adr     $pointer,.Lck
        movi    @vtmp[0].16b,#64
        cbnz    $enc,1f
        add     $keys,$keys,124