(void*)a_vexsh2, sz_vexsh2,
(void*)a_spill, sz_spill );
- vg_assert(VG_IS_32_ALIGNED(sz_vex));
- vg_assert(VG_IS_32_ALIGNED(sz_vexsh1));
- vg_assert(VG_IS_32_ALIGNED(sz_vexsh2));
- vg_assert(VG_IS_32_ALIGNED(sz_spill));
+ vg_assert(VG_IS_16_ALIGNED(sz_vex));
+ vg_assert(VG_IS_16_ALIGNED(sz_vexsh1));
+ vg_assert(VG_IS_16_ALIGNED(sz_vexsh2));
+ vg_assert(VG_IS_16_ALIGNED(sz_spill));
- vg_assert(VG_IS_32_ALIGNED(a_vex));
- vg_assert(VG_IS_32_ALIGNED(a_vexsh1));
- vg_assert(VG_IS_32_ALIGNED(a_vexsh2));
- vg_assert(VG_IS_32_ALIGNED(a_spill));
+ vg_assert(VG_IS_16_ALIGNED(a_vex));
+ vg_assert(VG_IS_16_ALIGNED(a_vexsh1));
+ vg_assert(VG_IS_16_ALIGNED(a_vexsh2));
+ vg_assert(VG_IS_16_ALIGNED(a_spill));
/* Check that the guest state and its two shadows have the same
size, and that there are no holes in between. The latter is
- offsetof(VexGuestAMD64State,guest_YMM0))
== (17/*#regs*/-1) * 32/*bytes per reg*/
);
- vg_assert(VG_IS_32_ALIGNED(offsetof(VexGuestAMD64State,guest_YMM0)));
+ vg_assert(VG_IS_16_ALIGNED(offsetof(VexGuestAMD64State,guest_YMM0)));
vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestAMD64State,guest_FPREG)));
vg_assert(16 == offsetof(VexGuestAMD64State,guest_RAX));
vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestAMD64State,guest_RAX)));
/* Note that for code generation reasons, we require that the
guest state area, its two shadows, and the spill area, are
- 32-aligned and have 32-aligned sizes, and there are no holes
+ 16-aligned and have 16-aligned sizes, and there are no holes
in between. This is checked by do_pre_run_checks() in
scheduler.c. */
/* Saved machine context. */
- VexGuestArchState vex __attribute__((aligned(32)));
+ VexGuestArchState vex __attribute__((aligned(16)));
/* Saved shadow context (2 copies). */
- VexGuestArchState vex_shadow1 __attribute__((aligned(32)));
- VexGuestArchState vex_shadow2 __attribute__((aligned(32)));
+ VexGuestArchState vex_shadow1 __attribute__((aligned(16)));
+ VexGuestArchState vex_shadow2 __attribute__((aligned(16)));
/* Spill area. */
- UChar vex_spill[LibVEX_N_SPILL_BYTES] __attribute__((aligned(32)));
+ UChar vex_spill[LibVEX_N_SPILL_BYTES] __attribute__((aligned(16)));
/* --- END vex-mandated guest state --- */
}