]> git.ipfire.org Git - thirdparty/valgrind.git/commitdiff
VG_(helperc_LOADV64le): add handwritten assembly helper for ARM32
authorJulian Seward <jseward@acm.org>
Mon, 20 Jul 2015 09:59:25 +0000 (09:59 +0000)
committerJulian Seward <jseward@acm.org>
Mon, 20 Jul 2015 09:59:25 +0000 (09:59 +0000)
VG_(helperc_LOADV32le): VG_(helperc_LOADV16le): VG_(helperc_LOADV8):
use ".L" style assembly labels so they don't appears as new symbols
during disassembly.

git-svn-id: svn://svn.valgrind.org/valgrind/trunk@15418

memcheck/mc_main.c

index 386c2319c24669e5aafe560772234bbd076ed291..6946936d191794e7d0bf534013e527c898a58dd6 100644 (file)
@@ -4426,14 +4426,60 @@ ULong mc_LOADV64 ( Addr a, Bool isBigEndian )
 #endif
 }
 
+// Generic for all platforms
 VG_REGPARM(1) ULong MC_(helperc_LOADV64be) ( Addr a )
 {
    return mc_LOADV64(a, True);
 }
+
+// Non-generic assembly for arm32-linux
+#if ENABLE_ASSEMBLY_HELPERS && defined(PERF_FAST_LOADV) \
+    && defined(VGP_arm_linux)
+__asm__( /* Derived from the 32 bit assembly helper */
+".text                                  \n"
+".align 2                               \n"
+".global vgMemCheck_helperc_LOADV64le   \n"
+".type   vgMemCheck_helperc_LOADV64le, %function \n"
+"vgMemCheck_helperc_LOADV64le:          \n"
+"      tst    r0, #7                    \n"
+"      movw   r3, #:lower16:primary_map \n"
+"      bne    .LLV64LEc4                \n" // if misaligned
+"      lsr    r2, r0, #16               \n"
+"      movt   r3, #:upper16:primary_map \n"
+"      ldr    r2, [r3, r2, lsl #2]      \n"
+"      uxth   r1, r0                    \n" // r1 is 0-(16)-0 X-(13)-X 000
+"      movw   r3, #0xAAAA               \n"
+"      lsr    r1, r1, #3                \n"
+"      ldrh   r1, [r2, r1]              \n"
+"      cmp    r1, r3                    \n" // 0xAAAA == VA_BITS16_DEFINED
+"      bne    .LLV64LEc0                \n" // if !all_defined
+"      mov    r1, #0x0                  \n" // 0x0 == V_BITS32_DEFINED
+"      mov    r0, #0x0                  \n" // 0x0 == V_BITS32_DEFINED
+"      bx     lr                        \n"
+".LLV64LEc0:                            \n"
+"      movw   r3, #0x5555               \n"
+"      cmp    r1, r3                    \n" // 0x5555 == VA_BITS16_UNDEFINED
+"      bne    .LLV64LEc4                \n" // if !all_undefined
+"      mov    r1, #0xFFFFFFFF           \n" // 0xFFFFFFFF == V_BITS32_UNDEFINED
+"      mov    r0, #0xFFFFFFFF           \n" // 0xFFFFFFFF == V_BITS32_UNDEFINED
+"      bx     lr                        \n"
+".LLV64LEc4:                            \n"
+"      push   {r4, lr}                  \n"
+"      mov    r2, #0                    \n"
+"      mov    r1, #64                   \n"
+"      bl     mc_LOADVn_slow            \n"
+"      pop    {r4, pc}                  \n"
+".size vgMemCheck_helperc_LOADV64le, .-vgMemCheck_helperc_LOADV64le \n"
+".previous\n"
+);
+
+#else
+// Generic for all platforms except arm32-linux
 VG_REGPARM(1) ULong MC_(helperc_LOADV64le) ( Addr a )
 {
    return mc_LOADV64(a, False);
 }
+#endif
 
 
 static INLINE
@@ -4561,22 +4607,22 @@ __asm__( /* Derived from NCode template */
 "vgMemCheck_helperc_LOADV32le:          \n"
 "      tst    r0, #3                    \n" // 1
 "      movw   r3, #:lower16:primary_map \n" // 1
-"      bne    LV32c4                    \n" // 2  if misaligned
+"      bne    .LLV32LEc4                \n" // 2  if misaligned
 "      lsr    r2, r0, #16               \n" // 3
 "      movt   r3, #:upper16:primary_map \n" // 3
 "      ldr    r2, [r3, r2, lsl #2]      \n" // 4
 "      uxth   r1, r0                    \n" // 4
 "      ldrb   r1, [r2, r1, lsr #2]      \n" // 5
 "      cmp    r1, #0xAA                 \n" // 6  0xAA == VA_BITS8_DEFINED
-"      bne    LV32c0                    \n" // 7  if !all_defined
+"      bne    .LLV32LEc0                \n" // 7  if !all_defined
 "      mov    r0, #0x0                  \n" // 8  0x0 == V_BITS32_DEFINED
 "      bx     lr                        \n" // 9
-"LV32c0:                                \n"
+".LLV32LEc0:                            \n"
 "      cmp    r1, #0x55                 \n" // 0x55 == VA_BITS8_UNDEFINED
-"      bne    LV32c4                    \n" // if !all_undefined
+"      bne    .LLV32LEc4                \n" // if !all_undefined
 "      mov    r0, #0xFFFFFFFF           \n" // 0xFFFFFFFF == V_BITS32_UNDEFINED
 "      bx     lr                        \n"
-"LV32c4:                                \n"
+".LLV32LEc4:                            \n"
 "      push   {r4, lr}                  \n"
 "      mov    r2, #0                    \n"
 "      mov    r1, #32                   \n"
@@ -4719,7 +4765,7 @@ __asm__( /* Derived from NCode template */
 ".type   vgMemCheck_helperc_LOADV16le, %function \n"
 "vgMemCheck_helperc_LOADV16le:          \n" //
 "      tst    r0, #1                    \n" // 
-"      bne    LV16c12                   \n" // if misaligned
+"      bne    .LLV16LEc12               \n" // if misaligned
 "      lsr    r2, r0, #16               \n" // r2 = pri-map-ix
 "      movw   r3, #:lower16:primary_map \n" //
 "      uxth   r1, r0                    \n" // r1 = sec-map-offB
@@ -4727,32 +4773,32 @@ __asm__( /* Derived from NCode template */
 "      ldr    r2, [r3, r2, lsl #2]      \n" // r2 = sec-map
 "      ldrb   r1, [r2, r1, lsr #2]      \n" // r1 = sec-map-VABITS8
 "      cmp    r1, #0xAA                 \n" // r1 == VA_BITS8_DEFINED?
-"      bne    LV16c0                    \n" // no, goto LV16c0
-"LV16h9:                                \n" //
+"      bne    .LLV16LEc0                \n" // no, goto .LLV16LEc0
+".LLV16LEh9:                            \n" //
 "      mov    r0, #0xFFFFFFFF           \n" //
 "      lsl    r0, r0, #16               \n" // V_BITS16_DEFINED | top16safe
 "      bx     lr                        \n" //
-"LV16c0:                                \n" //
+".LLV16LEc0:                            \n" //
 "      cmp    r1, #0x55                 \n" // VA_BITS8_UNDEFINED
-"      bne    LV16c4                    \n" //
-"LV16c2:                                \n" //
+"      bne    .LLV16LEc4                \n" //
+".LLV16LEc2:                            \n" //
 "      mov    r0, #0xFFFFFFFF           \n" // V_BITS16_UNDEFINED | top16safe
 "      bx     lr                        \n" //
-"LV16c4:                                \n" //
+".LLV16LEc4:                            \n" //
        // r1 holds sec-map-VABITS8.  r0 holds the address and is 2-aligned.
        // Extract the relevant 4 bits and inspect.
-"      and    r2, r0, #2        \n" // addr & 2
-"      add    r2, r2, r2        \n" // 2 * (addr & 2)
-"      lsr    r1, r1, r2        \n" // sec-map-VABITS8 >> (2 * (addr & 2))
-"      and    r1, r1, #15       \n" // (sec-map-VABITS8 >> (2 * (addr & 2))) & 15
+"      and    r2, r0, #2       \n" // addr & 2
+"      add    r2, r2, r2       \n" // 2 * (addr & 2)
+"      lsr    r1, r1, r2       \n" // sec-map-VABITS8 >> (2 * (addr & 2))
+"      and    r1, r1, #15      \n" // (sec-map-VABITS8 >> (2 * (addr & 2))) & 15
 
 "      cmp    r1, #0xA                  \n" // VA_BITS4_DEFINED
-"      beq    LV16h9                    \n" //
+"      beq    .LLV16LEh9                \n" //
 
 "      cmp    r1, #0x5                  \n" // VA_BITS4_UNDEFINED
-"      beq    LV16c2                    \n" //
+"      beq    .LLV16LEc2                \n" //
 
-"LV16c12:                               \n" //
+".LLV16LEc12:                           \n" //
 "      push   {r4, lr}                  \n" //
 "      mov    r2, #0                    \n" //
 "      mov    r1, #16                   \n" //
@@ -4871,29 +4917,29 @@ __asm__( /* Derived from NCode template */
 "      ldr    r2, [r3, r2, lsl #2]      \n" // r2 = sec-map
 "      ldrb   r1, [r2, r1, lsr #2]      \n" // r1 = sec-map-VABITS8
 "      cmp    r1, #0xAA                 \n" // r1 == VA_BITS8_DEFINED?
-"      bne    LV8c0                     \n" // no, goto LV8c0
-"LV8h9:                                 \n" //
+"      bne    .LLV8c0                   \n" // no, goto .LLV8c0
+".LLV8h9:                               \n" //
 "      mov    r0, #0xFFFFFF00           \n" // V_BITS8_DEFINED | top24safe
 "      bx     lr                        \n" //
-"LV8c0:                                 \n" //
+".LLV8c0:                               \n" //
 "      cmp    r1, #0x55                 \n" // VA_BITS8_UNDEFINED
-"      bne    LV8c4                     \n" //
-"LV8c2:                                 \n" //
+"      bne    .LLV8c4                   \n" //
+".LLV8c2:                               \n" //
 "      mov    r0, #0xFFFFFFFF           \n" // V_BITS8_UNDEFINED | top24safe
 "      bx     lr                        \n" //
-"LV8c4:                                 \n" //
+".LLV8c4:                               \n" //
        // r1 holds sec-map-VABITS8
        // r0 holds the address.  Extract the relevant 2 bits and inspect.
-"      and    r2, r0, #3        \n" // addr & 3
-"      add    r2, r2, r2        \n" // 2 * (addr & 3)
-"      lsr    r1, r1, r2        \n" // sec-map-VABITS8 >> (2 * (addr & 3))
-"      and    r1, r1, #3        \n" // (sec-map-VABITS8 >> (2 * (addr & 3))) & 3
+"      and    r2, r0, #3       \n" // addr & 3
+"      add    r2, r2, r2       \n" // 2 * (addr & 3)
+"      lsr    r1, r1, r2       \n" // sec-map-VABITS8 >> (2 * (addr & 3))
+"      and    r1, r1, #3       \n" // (sec-map-VABITS8 >> (2 * (addr & 3))) & 3
 
 "      cmp    r1, #2                    \n" // VA_BITS2_DEFINED
-"      beq    LV8h9                     \n" //
+"      beq    .LLV8h9                   \n" //
 
 "      cmp    r1, #1                    \n" // VA_BITS2_UNDEFINED
-"      beq    LV8c2                     \n" //
+"      beq    .LLV8c2                   \n" //
 
 "      push   {r4, lr}                  \n" //
 "      mov    r2, #0                    \n" //