vg_assert(sz_spill == LibVEX_N_SPILL_BYTES);
vg_assert(a_vex + 3 * sz_vex == a_spill);
+# if defined(VGA_amd64)
+ /* x86/amd64 XMM regs must form an array, ie, have no
+ holes in between. */
+ vg_assert(
+ (offsetof(VexGuestAMD64State,guest_XMM16)
+ - offsetof(VexGuestAMD64State,guest_XMM0))
+ == (17/*#regs*/-1) * 16/*bytes per reg*/
+ );
+# endif
+
# if defined(VGA_ppc32) || defined(VGA_ppc64)
/* ppc guest_state vector regs must be 16 byte aligned for
loads/stores. This is important! */
vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex.guest_VR1));
vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow1.guest_VR1));
vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow2.guest_VR1));
-# endif
+# endif
# if defined(VGA_arm)
/* arm guest_state VFP regs must be 8 byte aligned for
if (o >= GOF(XMM13) && o+sz <= GOF(XMM13)+SZB(XMM13)) return GOF(XMM13);
if (o >= GOF(XMM14) && o+sz <= GOF(XMM14)+SZB(XMM14)) return GOF(XMM14);
if (o >= GOF(XMM15) && o+sz <= GOF(XMM15)+SZB(XMM15)) return GOF(XMM15);
+ if (o >= GOF(XMM16) && o+sz <= GOF(XMM16)+SZB(XMM16)) return GOF(XMM16);
/* MMX accesses to FP regs. Need to allow for 32-bit references
due to dirty helpers for frstor etc, which reference the entire