]> git.ipfire.org Git - thirdparty/glibc.git/commitdiff
Makes AArch64 assembly acceptable to clang
authorShu-Chun Weng <scw@google.com>
Fri, 19 Apr 2019 21:47:59 +0000 (14:47 -0700)
committerFangrui Song <i@maskray.me>
Sat, 28 Aug 2021 00:26:01 +0000 (17:26 -0700)
According to ARMv8 architecture reference manual section C7.2.188, SIMD MOV (to
general) instruction format is

  MOV <Xd>, <Vn>.D[<index>]

gas appears to accept "<Vn>.2D[<index>]" as well, but clang's assembler does
not. C.f. https://community.arm.com/developer/ip-products/processors/f/cortex-a-forum/5214/aarch64-assembly-syntax-for-armclang

sysdeps/aarch64/memchr.S
sysdeps/aarch64/strchr.S
sysdeps/aarch64/strchrnul.S
sysdeps/aarch64/strrchr.S

index e422aef090d10074b64f7148651270dbce04154d..a3b0fc64861127eec7fe38d3f8c281cde2b0641c 100644 (file)
@@ -91,7 +91,7 @@ ENTRY (__memchr)
        and     vhas_chr2.16b, vhas_chr2.16b, vrepmask.16b
        addp    vend.16b, vhas_chr1.16b, vhas_chr2.16b          /* 256->128 */
        addp    vend.16b, vend.16b, vend.16b                    /* 128->64 */
-       mov     synd, vend.2d[0]
+       mov     synd, vend.d[0]
        /* Clear the soff*2 lower bits */
        lsl     tmp, soff, #1
        lsr     synd, synd, tmp
@@ -111,7 +111,7 @@ L(loop):
        /* Use a fast check for the termination condition */
        orr     vend.16b, vhas_chr1.16b, vhas_chr2.16b
        addp    vend.2d, vend.2d, vend.2d
-       mov     synd, vend.2d[0]
+       mov     synd, vend.d[0]
        /* We're not out of data, loop if we haven't found the character */
        cbz     synd, L(loop)
 
@@ -121,7 +121,7 @@ L(end):
        and     vhas_chr2.16b, vhas_chr2.16b, vrepmask.16b
        addp    vend.16b, vhas_chr1.16b, vhas_chr2.16b          /* 256->128 */
        addp    vend.16b, vend.16b, vend.16b                    /* 128->64 */
-       mov     synd, vend.2d[0]
+       mov     synd, vend.d[0]
        /* Only do the clear for the last possible block */
        b.hi    L(tail)
 
index c27465220b417645cea989df8227e59ed31b0e96..57fd780dfa71815d9ff79e7e1fc37ea760dffeba 100644 (file)
@@ -94,7 +94,7 @@ ENTRY (strchr)
        addp    vend1.16b, vend1.16b, vend2.16b         // 128->64
        lsr     tmp1, tmp3, tmp1
 
-       mov     tmp3, vend1.2d[0]
+       mov     tmp3, vend1.d[0]
        bic     tmp1, tmp3, tmp1        // Mask padding bits.
        cbnz    tmp1, L(tail)
 
@@ -109,7 +109,7 @@ L(loop):
        orr     vend2.16b, vhas_nul2.16b, vhas_chr2.16b
        orr     vend1.16b, vend1.16b, vend2.16b
        addp    vend1.2d, vend1.2d, vend1.2d
-       mov     tmp1, vend1.2d[0]
+       mov     tmp1, vend1.d[0]
        cbz     tmp1, L(loop)
 
        /* Termination condition found.  Now need to establish exactly why
@@ -123,7 +123,7 @@ L(loop):
        addp    vend1.16b, vend1.16b, vend2.16b         // 256->128
        addp    vend1.16b, vend1.16b, vend2.16b         // 128->64
 
-       mov     tmp1, vend1.2d[0]
+       mov     tmp1, vend1.d[0]
 L(tail):
        sub     src, src, #32
        rbit    tmp1, tmp1
index e13ace5b7e95f666ad4e7ecf7b223ab6bdeff2c0..7f340d3adf47825ed76283f1e5983e4e0703a42f 100644 (file)
@@ -91,7 +91,7 @@ ENTRY (__strchrnul)
        addp    vend1.16b, vend1.16b, vend1.16b         // 128->64
        lsr     tmp1, tmp3, tmp1
 
-       mov     tmp3, vend1.2d[0]
+       mov     tmp3, vend1.d[0]
        bic     tmp1, tmp3, tmp1        // Mask padding bits.
        cbnz    tmp1, L(tail)
 
@@ -106,7 +106,7 @@ L(loop):
        orr     vhas_chr2.16b, vhas_nul2.16b, vhas_chr2.16b
        orr     vend1.16b, vhas_chr1.16b, vhas_chr2.16b
        addp    vend1.2d, vend1.2d, vend1.2d
-       mov     tmp1, vend1.2d[0]
+       mov     tmp1, vend1.d[0]
        cbz     tmp1, L(loop)
 
        /* Termination condition found.  Now need to establish exactly why
@@ -116,7 +116,7 @@ L(loop):
        addp    vend1.16b, vhas_chr1.16b, vhas_chr2.16b         // 256->128
        addp    vend1.16b, vend1.16b, vend1.16b         // 128->64
 
-       mov     tmp1, vend1.2d[0]
+       mov     tmp1, vend1.d[0]
 L(tail):
        /* Count the trailing zeros, by bit reversing...  */
        rbit    tmp1, tmp1
index aa334ede53301c563660999b97685db0cc6a6618..1b7e238f49e559ce917eb35aa256723e6434e004 100644 (file)
@@ -101,10 +101,10 @@ ENTRY(strrchr)
        addp    vhas_chr1.16b, vhas_chr1.16b, vhas_chr2.16b     // 256->128
        addp    vhas_nul1.16b, vhas_nul1.16b, vhas_nul1.16b     // 128->64
        addp    vhas_chr1.16b, vhas_chr1.16b, vhas_chr1.16b     // 128->64
-       mov     nul_match, vhas_nul1.2d[0]
+       mov     nul_match, vhas_nul1.d[0]
        lsl     tmp1, tmp1, #1
        mov     const_m1, #~0
-       mov     chr_match, vhas_chr1.2d[0]
+       mov     chr_match, vhas_chr1.d[0]
        lsr     tmp3, const_m1, tmp1
 
        bic     nul_match, nul_match, tmp3      // Mask padding bits.
@@ -127,15 +127,15 @@ L(aligned):
        addp    vhas_chr1.16b, vhas_chr1.16b, vhas_chr2.16b     // 256->128
        addp    vend1.16b, vend1.16b, vend1.16b // 128->64
        addp    vhas_chr1.16b, vhas_chr1.16b, vhas_chr1.16b     // 128->64
-       mov     nul_match, vend1.2d[0]
-       mov     chr_match, vhas_chr1.2d[0]
+       mov     nul_match, vend1.d[0]
+       mov     chr_match, vhas_chr1.d[0]
        cbz     nul_match, L(loop)
 
        and     vhas_nul1.16b, vhas_nul1.16b, vrepmask_0.16b
        and     vhas_nul2.16b, vhas_nul2.16b, vrepmask_0.16b
        addp    vhas_nul1.16b, vhas_nul1.16b, vhas_nul2.16b
        addp    vhas_nul1.16b, vhas_nul1.16b, vhas_nul1.16b
-       mov     nul_match, vhas_nul1.2d[0]
+       mov     nul_match, vhas_nul1.d[0]
 
 L(tail):
        /* Work out exactly where the string ends.  */