]> git.ipfire.org Git - thirdparty/glibc.git/commitdiff
powerpc: Rearrange cfi_offset calls
authorRajalakshmi Srinivasaraghavan <raji@linux.vnet.ibm.com>
Tue, 23 Feb 2016 05:40:34 +0000 (11:10 +0530)
committerTulio Magno Quites Machado Filho <tuliom@linux.vnet.ibm.com>
Fri, 11 Mar 2016 14:31:58 +0000 (11:31 -0300)
This patch rearranges cfi_offset() calls after the last store
so as to avoid extra DW_CFA_advance opcodes in unwind information.

ChangeLog
sysdeps/powerpc/powerpc32/power4/memcmp.S
sysdeps/powerpc/powerpc32/power6/memcpy.S
sysdeps/powerpc/powerpc32/power7/memcmp.S
sysdeps/powerpc/powerpc64/power4/memcmp.S
sysdeps/powerpc/powerpc64/power7/memcmp.S
sysdeps/powerpc/powerpc64/power7/strstr.S

index 1064bc8c8ed26ffe7db5e768c39e6a56a0e46af5..440b0212d2bb470fe4b87283ba58839367b96c19 100644 (file)
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,13 @@
+2016-03-11  Rajalakshmi Srinivasaraghavan  <raji@linux.vnet.ibm.com>
+
+       * sysdeps/powerpc/powerpc32/power4/memcmp.S (memcmp): Rearrange
+       cfi_offset calls.
+       * sysdeps/powerpc/powerpc32/power6/memcpy.S (memcpy): Likewise.
+       * sysdeps/powerpc/powerpc32/power7/memcmp.S (memcmp): Likewise.
+       * sysdeps/powerpc/powerpc64/power4/memcmp.S (memcmp): Likewise.
+       * sysdeps/powerpc/powerpc64/power7/memcmp.S (memcmp): Likewise.
+       * sysdeps/powerpc/powerpc64/power7/strstr.S (strstr): Likewise.
+
 2016-03-10  Carlos O'Donell  <carlos@redhat.com>
 
        * timezone/checktab.awk: Removed.
index 602a7957ad1903eb4cc84abeaf93feec5d9499de..9cb116e2323219723d2bc1127ffb3888765ee51d 100644 (file)
@@ -54,8 +54,8 @@ EALIGN (memcmp, 4, 0)
        stwu    1, -64(r1)
        cfi_adjust_cfa_offset(64)
        stw     rWORD8, 48(r1)
-       cfi_offset(rWORD8, (48-64))
        stw     rWORD7, 44(r1)
+       cfi_offset(rWORD8, (48-64))
        cfi_offset(rWORD7, (44-64))
        bne     L(unaligned)
 /* At this point we know both strings have the same alignment and the
@@ -747,18 +747,18 @@ L(unaligned):
    the actual start of rSTR2.  */
        clrrwi  rSTR2, rSTR2, 2
        stw     rWORD2_SHIFT, 28(r1)
-       cfi_offset(rWORD2_SHIFT, (28-64))
 /* Compute the left/right shift counts for the unaligned rSTR2,
    compensating for the logical (W aligned) start of rSTR1.  */
        clrlwi  rSHL, rWORD8_SHIFT, 30
        clrrwi  rSTR1, rSTR1, 2
        stw     rWORD4_SHIFT, 24(r1)
-       cfi_offset(rWORD4_SHIFT, (24-64))
        slwi    rSHL, rSHL, 3
        cmplw   cr5, rWORD8_SHIFT, rSTR2
        add     rN, rN, r12
        slwi    rWORD6, r12, 3
        stw     rWORD6_SHIFT, 20(r1)
+       cfi_offset(rWORD2_SHIFT, (28-64))
+       cfi_offset(rWORD4_SHIFT, (24-64))
        cfi_offset(rWORD6_SHIFT, (20-64))
        subfic  rSHR, rSHL, 32
        srwi    r0, rN, 4       /* Divide by 16 */
@@ -852,15 +852,15 @@ L(duPs4):
        .align  4
 L(Wunaligned):
        stw     rWORD8_SHIFT, 32(r1)
-       cfi_offset(rWORD8_SHIFT, (32-64))
        clrrwi  rSTR2, rSTR2, 2
        stw     rWORD2_SHIFT, 28(r1)
-       cfi_offset(rWORD2_SHIFT, (28-64))
        srwi    r0, rN, 4       /* Divide by 16 */
        stw     rWORD4_SHIFT, 24(r1)
-       cfi_offset(rWORD4_SHIFT, (24-64))
        andi.   r12, rN, 12     /* Get the W remainder */
        stw     rWORD6_SHIFT, 20(r1)
+       cfi_offset(rWORD8_SHIFT, (32-64))
+       cfi_offset(rWORD2_SHIFT, (28-64))
+       cfi_offset(rWORD4_SHIFT, (24-64))
        cfi_offset(rWORD6_SHIFT, (20-64))
        slwi    rSHL, rSHL, 3
 #ifdef __LITTLE_ENDIAN__
index 6dff0ed6df2fc80429b539bd1558ee7e6607dd68..ae796a26c1288dd9349a64c4a6fbb64335c0027a 100644 (file)
@@ -46,8 +46,8 @@ EALIGN (memcpy, 5, 0)
     ble-   cr1,L(word_unaligned_short) /* If move < 32 bytes.  */
     cmplw  cr6,10,11
     stw    31,24(1)
-    cfi_offset(31,(24-32))
     stw    30,20(1)
+    cfi_offset(31,(24-32))
     cfi_offset(30,(20-32))
     mr     30,3
     beq    .L0
index 9c06a89491e424475a176e6cd288db90243633f4..13e849210640622ebd454f0967c5e5ccf8d60f89 100644 (file)
@@ -54,8 +54,8 @@ EALIGN (memcmp, 4, 0)
        stwu    1, -64(r1)
        cfi_adjust_cfa_offset(64)
        stw     rWORD8, 48(r1)
-       cfi_offset(rWORD8, (48-64))
        stw     rWORD7, 44(r1)
+       cfi_offset(rWORD8, (48-64))
        cfi_offset(rWORD7, (44-64))
        bne     L(unaligned)
 /* At this point we know both strings have the same alignment and the
@@ -747,18 +747,18 @@ L(unaligned):
    the actual start of rSTR2.  */
        clrrwi  rSTR2, rSTR2, 2
        stw     rWORD2_SHIFT, 28(r1)
-       cfi_offset(rWORD2_SHIFT, (28-64))
 /* Compute the left/right shift counts for the unaligned rSTR2,
    compensating for the logical (W aligned) start of rSTR1.  */
        clrlwi  rSHL, rWORD8_SHIFT, 30
        clrrwi  rSTR1, rSTR1, 2
        stw     rWORD4_SHIFT, 24(r1)
-       cfi_offset(rWORD4_SHIFT, (24-64))
        slwi    rSHL, rSHL, 3
        cmplw   cr5, rWORD8_SHIFT, rSTR2
        add     rN, rN, r12
        slwi    rWORD6, r12, 3
        stw     rWORD6_SHIFT, 20(r1)
+       cfi_offset(rWORD2_SHIFT, (28-64))
+       cfi_offset(rWORD4_SHIFT, (24-64))
        cfi_offset(rWORD6_SHIFT, (20-64))
        subfic  rSHR, rSHL, 32
        srwi    r0, rN, 4       /* Divide by 16 */
@@ -852,15 +852,15 @@ L(duPs4):
        .align  4
 L(Wunaligned):
        stw     rWORD8_SHIFT, 32(r1)
-       cfi_offset(rWORD8_SHIFT, (32-64))
        clrrwi  rSTR2, rSTR2, 2
        stw     rWORD2_SHIFT, 28(r1)
-       cfi_offset(rWORD2_SHIFT, (28-64))
        srwi    r0, rN, 4       /* Divide by 16 */
        stw     rWORD4_SHIFT, 24(r1)
-       cfi_offset(rWORD4_SHIFT, (24-64))
        andi.   r12, rN, 12     /* Get the W remainder */
        stw     rWORD6_SHIFT, 20(r1)
+       cfi_offset(rWORD8_SHIFT, (32-64))
+       cfi_offset(rWORD2_SHIFT, (28-64))
+       cfi_offset(rWORD4_SHIFT, (24-64))
        cfi_offset(rWORD6_SHIFT, (20-64))
        slwi    rSHL, rSHL, 3
 #ifdef __LITTLE_ENDIAN__
index c1a77c64b05fe70301f5239714d505f20c673734..65c659660d9948c5ba3e926ae4ea2345ef9c0de6 100644 (file)
@@ -52,8 +52,8 @@ EALIGN (memcmp, 4, 0)
    byte loop.  */
        blt     cr1, L(bytealigned)
        std     rWORD8, -8(r1)
-       cfi_offset(rWORD8, -8)
        std     rWORD7, -16(r1)
+       cfi_offset(rWORD8, -8)
        cfi_offset(rWORD7, -16)
        bne     L(unaligned)
 /* At this point we know both strings have the same alignment and the
@@ -728,18 +728,18 @@ L(unaligned):
    the actual start of rSTR2.  */
        clrrdi  rSTR2, rSTR2, 3
        std     rWORD2_SHIFT, -48(r1)
-       cfi_offset(rWORD2_SHIFT, -48)
 /* Compute the left/right shift counts for the unaligned rSTR2,
    compensating for the logical (DW aligned) start of rSTR1.  */
        clrldi  rSHL, rWORD8_SHIFT, 61
        clrrdi  rSTR1, rSTR1, 3
        std     rWORD4_SHIFT, -56(r1)
-       cfi_offset(rWORD4_SHIFT, -56)
        sldi    rSHL, rSHL, 3
        cmpld   cr5, rWORD8_SHIFT, rSTR2
        add     rN, rN, r12
        sldi    rWORD6, r12, 3
        std     rWORD6_SHIFT, -64(r1)
+       cfi_offset(rWORD2_SHIFT, -48)
+       cfi_offset(rWORD4_SHIFT, -56)
        cfi_offset(rWORD6_SHIFT, -64)
        subfic  rSHR, rSHL, 64
        srdi    r0, rN, 5       /* Divide by 32 */
@@ -833,15 +833,15 @@ L(duPs4):
        .align  4
 L(DWunaligned):
        std     rWORD8_SHIFT, -40(r1)
-       cfi_offset(rWORD8_SHIFT, -40)
        clrrdi  rSTR2, rSTR2, 3
        std     rWORD2_SHIFT, -48(r1)
-       cfi_offset(rWORD2_SHIFT, -48)
        srdi    r0, rN, 5       /* Divide by 32 */
        std     rWORD4_SHIFT, -56(r1)
-       cfi_offset(rWORD4_SHIFT, -56)
        andi.   r12, rN, 24     /* Get the DW remainder */
        std     rWORD6_SHIFT, -64(r1)
+       cfi_offset(rWORD8_SHIFT, -40)
+       cfi_offset(rWORD2_SHIFT, -48)
+       cfi_offset(rWORD4_SHIFT, -56)
        cfi_offset(rWORD6_SHIFT, -64)
        sldi    rSHL, rSHL, 3
 #ifdef __LITTLE_ENDIAN__
index 4be29008c717c002f8c23ba17a5fe3ac01ffde98..881c7d5838cfb3d0a215d9351b4cea6f04a6fadf 100644 (file)
@@ -82,17 +82,17 @@ EALIGN (memcmp, 4, 0)
    byte loop.  */
        blt     cr1, L(bytealigned)
        std     rWORD8, rWORD8SAVE(r1)
-       cfi_offset(rWORD8, rWORD8SAVE)
        std     rWORD7, rWORD7SAVE(r1)
-       cfi_offset(rWORD7, rWORD7SAVE)
        std     rOFF8, rOFF8SAVE(r1)
-       cfi_offset(rWORD7, rOFF8SAVE)
        std     rOFF16, rOFF16SAVE(r1)
-       cfi_offset(rWORD7, rOFF16SAVE)
        std     rOFF24, rOFF24SAVE(r1)
-       cfi_offset(rWORD7, rOFF24SAVE)
        std     rOFF32, rOFF32SAVE(r1)
-       cfi_offset(rWORD7, rOFF32SAVE)
+       cfi_offset(rWORD8, rWORD8SAVE)
+       cfi_offset(rWORD7, rWORD7SAVE)
+       cfi_offset(rOFF8, rOFF8SAVE)
+       cfi_offset(rOFF16, rOFF16SAVE)
+       cfi_offset(rOFF24, rOFF24SAVE)
+       cfi_offset(rOFF32, rOFF32SAVE)
 
        li      rOFF8,8
        li      rOFF16,16
@@ -601,18 +601,18 @@ L(unaligned):
    the actual start of rSTR2.  */
        clrrdi  rSTR2, rSTR2, 3
        std     rWORD2_SHIFT, rWORD2SHIFTSAVE(r1)
-       cfi_offset(rWORD2_SHIFT, rWORD2SHIFTSAVE)
 /* Compute the left/right shift counts for the unaligned rSTR2,
    compensating for the logical (DW aligned) start of rSTR1.  */
        clrldi  rSHL, rWORD8_SHIFT, 61
        clrrdi  rSTR1, rSTR1, 3
        std     rWORD4_SHIFT, rWORD4SHIFTSAVE(r1)
-       cfi_offset(rWORD4_SHIFT, rWORD4SHIFTSAVE)
        sldi    rSHL, rSHL, 3
        cmpld   cr5, rWORD8_SHIFT, rSTR2
        add     rN, rN, r12
        sldi    rWORD6, r12, 3
        std     rWORD6_SHIFT, rWORD6SHIFTSAVE(r1)
+       cfi_offset(rWORD2_SHIFT, rWORD2SHIFTSAVE)
+       cfi_offset(rWORD4_SHIFT, rWORD4SHIFTSAVE)
        cfi_offset(rWORD6_SHIFT, rWORD6SHIFTSAVE)
        subfic  rSHR, rSHL, 64
        srdi    r0, rN, 5       /* Divide by 32 */
@@ -689,15 +689,15 @@ L(duPs4):
        .align  4
 L(DWunaligned):
        std     rWORD8_SHIFT, rWORD8SHIFTSAVE(r1)
-       cfi_offset(rWORD8_SHIFT, rWORD8SHIFTSAVE)
        clrrdi  rSTR2, rSTR2, 3
        std     rWORD2_SHIFT, rWORD2SHIFTSAVE(r1)
-       cfi_offset(rWORD2_SHIFT, rWORD2SHIFTSAVE)
        srdi    r0, rN, 5       /* Divide by 32 */
        std     rWORD4_SHIFT, rWORD4SHIFTSAVE(r1)
-       cfi_offset(rWORD4_SHIFT, rWORD4SHIFTSAVE)
        andi.   r12, rN, 24     /* Get the DW remainder */
        std     rWORD6_SHIFT, rWORD6SHIFTSAVE(r1)
+       cfi_offset(rWORD8_SHIFT, rWORD8SHIFTSAVE)
+       cfi_offset(rWORD2_SHIFT, rWORD2SHIFTSAVE)
+       cfi_offset(rWORD4_SHIFT, rWORD4SHIFTSAVE)
        cfi_offset(rWORD6_SHIFT, rWORD6SHIFTSAVE)
        sldi    rSHL, rSHL, 3
        LD      rWORD6, 0, rSTR2
index fefac1c9e5fd6e0a537c89720c274a8f4def0e5a..0e18193e915b9dac843b0120518cac4bcf59ffb4 100644 (file)
@@ -59,14 +59,14 @@ EALIGN (strstr, 4, 0)
        CALL_MCOUNT 2
        mflr    r0                      /* Load link register LR to r0.  */
        std     r31, -8(r1)             /* Save callers register r31.  */
-       cfi_offset(r31, -8)
        std     r30, -16(r1)            /* Save callers register r30.  */
-       cfi_offset(r30, -16)
        std     r29, -24(r1)            /* Save callers register r29.  */
-       cfi_offset(r29, -24)
        std     r28, -32(r1)            /* Save callers register r28.  */
-       cfi_offset(r28, -32)
        std     r0, 16(r1)              /* Store the link register.  */
+       cfi_offset(r31, -8)
+       cfi_offset(r30, -16)
+       cfi_offset(r28, -32)
+       cfi_offset(r29, -24)
        cfi_offset(lr, 16)
        stdu    r1, -FRAMESIZE(r1)      /* Create the stack frame.  */
        cfi_adjust_cfa_offset(FRAMESIZE)