]> git.ipfire.org Git - thirdparty/valgrind.git/commitdiff
Back off from 32 alignment of various guest state bits. We don't
authorJulian Seward <jseward@acm.org>
Thu, 2 Aug 2012 22:08:53 +0000 (22:08 +0000)
committerJulian Seward <jseward@acm.org>
Thu, 2 Aug 2012 22:08:53 +0000 (22:08 +0000)
actually need it, and gcc on MacOS simply fails (for whatever reason)
to actually honour requests for 32 alignment, and fall back to the
previous setting (16 alignment).

git-svn-id: svn://svn.valgrind.org/valgrind/trunk@12817

coregrind/m_scheduler/scheduler.c
coregrind/m_threadstate.c
coregrind/pub_core_threadstate.h

index a1a3f9d818757c4befc6227434b6db1f32d78c23..a36f219030986f8925dd498cf3477e747b25beb8 100644 (file)
@@ -699,15 +699,15 @@ static void do_pre_run_checks ( ThreadState* tst )
                (void*)a_vexsh2, sz_vexsh2,
                (void*)a_spill, sz_spill );
 
-   vg_assert(VG_IS_32_ALIGNED(sz_vex));
-   vg_assert(VG_IS_32_ALIGNED(sz_vexsh1));
-   vg_assert(VG_IS_32_ALIGNED(sz_vexsh2));
-   vg_assert(VG_IS_32_ALIGNED(sz_spill));
+   vg_assert(VG_IS_16_ALIGNED(sz_vex));
+   vg_assert(VG_IS_16_ALIGNED(sz_vexsh1));
+   vg_assert(VG_IS_16_ALIGNED(sz_vexsh2));
+   vg_assert(VG_IS_16_ALIGNED(sz_spill));
 
-   vg_assert(VG_IS_32_ALIGNED(a_vex));
-   vg_assert(VG_IS_32_ALIGNED(a_vexsh1));
-   vg_assert(VG_IS_32_ALIGNED(a_vexsh2));
-   vg_assert(VG_IS_32_ALIGNED(a_spill));
+   vg_assert(VG_IS_16_ALIGNED(a_vex));
+   vg_assert(VG_IS_16_ALIGNED(a_vexsh1));
+   vg_assert(VG_IS_16_ALIGNED(a_vexsh2));
+   vg_assert(VG_IS_16_ALIGNED(a_spill));
 
    /* Check that the guest state and its two shadows have the same
       size, and that there are no holes in between.  The latter is
@@ -746,7 +746,7 @@ static void do_pre_run_checks ( ThreadState* tst )
        - offsetof(VexGuestAMD64State,guest_YMM0))
       == (17/*#regs*/-1) * 32/*bytes per reg*/
    );
-   vg_assert(VG_IS_32_ALIGNED(offsetof(VexGuestAMD64State,guest_YMM0)));
+   vg_assert(VG_IS_16_ALIGNED(offsetof(VexGuestAMD64State,guest_YMM0)));
    vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestAMD64State,guest_FPREG)));
    vg_assert(16 == offsetof(VexGuestAMD64State,guest_RAX));
    vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestAMD64State,guest_RAX)));
index a78506b959fb46f61278bc7598df2cba27e5ecfd..6b2a12b7b4cc7e1bca47e20b0abc494888ff0cca 100644 (file)
@@ -44,7 +44,7 @@
 
 ThreadId VG_(running_tid) = VG_INVALID_THREADID;
 
-ThreadState VG_(threads)[VG_N_THREADS] __attribute__((aligned(32)));
+ThreadState VG_(threads)[VG_N_THREADS] __attribute__((aligned(16)));
 
 /*------------------------------------------------------------*/
 /*--- Operations.                                          ---*/
index 2e80f87bf07dacb14ac8c823d538f94f66b342af..403d3de69ad1480404f08e907c86c4c51bd2118c 100644 (file)
@@ -104,19 +104,19 @@ typedef
 
       /* Note that for code generation reasons, we require that the
          guest state area, its two shadows, and the spill area, are
-         32-aligned and have 32-aligned sizes, and there are no holes
+         16-aligned and have 16-aligned sizes, and there are no holes
          in between.  This is checked by do_pre_run_checks() in
          scheduler.c. */
 
       /* Saved machine context. */
-      VexGuestArchState vex __attribute__((aligned(32)));
+      VexGuestArchState vex __attribute__((aligned(16)));
 
       /* Saved shadow context (2 copies). */
-      VexGuestArchState vex_shadow1 __attribute__((aligned(32)));
-      VexGuestArchState vex_shadow2 __attribute__((aligned(32)));
+      VexGuestArchState vex_shadow1 __attribute__((aligned(16)));
+      VexGuestArchState vex_shadow2 __attribute__((aligned(16)));
 
       /* Spill area. */
-      UChar vex_spill[LibVEX_N_SPILL_BYTES] __attribute__((aligned(32)));
+      UChar vex_spill[LibVEX_N_SPILL_BYTES] __attribute__((aligned(16)));
 
       /* --- END vex-mandated guest state --- */
    }