This assumes that guest_RIP_curr_instr is set correctly!
On FreeBSD, this kind of error generates a SIGBUS. */
static
-void gen_SIGNAL_if_not_XX_aligned ( IRTemp effective_addr, ULong mask )
+void gen_SIGNAL_if_not_XX_aligned ( const VexAbiInfo* vbi,
+ IRTemp effective_addr, ULong mask )
{
stmt(
IRStmt_Exit(
binop(Iop_CmpNE64,
binop(Iop_And64,mkexpr(effective_addr),mkU64(mask)),
mkU64(0)),
-#if defined(VGO_freebsd)
- Ijk_SigBUS,
-#else
- Ijk_SigSEGV,
-#endif
+ vbi->guest_amd64_sigbus_on_misalign ? Ijk_SigBUS : Ijk_SigSEGV,
IRConst_U64(guest_RIP_curr_instr),
OFFB_RIP
)
);
}
-static void gen_SIGNAL_if_not_16_aligned ( IRTemp effective_addr ) {
- gen_SIGNAL_if_not_XX_aligned(effective_addr, 16-1);
+static void gen_SIGNAL_if_not_16_aligned ( const VexAbiInfo* vbi,
+ IRTemp effective_addr ) {
+ gen_SIGNAL_if_not_XX_aligned(vbi, effective_addr, 16-1);
}
-static void gen_SIGNAL_if_not_32_aligned ( IRTemp effective_addr ) {
- gen_SIGNAL_if_not_XX_aligned(effective_addr, 32-1);
+static void gen_SIGNAL_if_not_32_aligned ( const VexAbiInfo* vbi,
+ IRTemp effective_addr ) {
+ gen_SIGNAL_if_not_XX_aligned(vbi, effective_addr, 32-1);
}
-static void gen_SIGNAL_if_not_64_aligned ( IRTemp effective_addr ) {
- gen_SIGNAL_if_not_XX_aligned(effective_addr, 64-1);
+static void gen_SIGNAL_if_not_64_aligned ( const VexAbiInfo* vbi,
+ IRTemp effective_addr ) {
+ gen_SIGNAL_if_not_XX_aligned(vbi, effective_addr, 64-1);
}
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
delta += alen;
- gen_SIGNAL_if_not_64_aligned(addr);
+ gen_SIGNAL_if_not_64_aligned(vbi, addr);
DIP("%sxsave %s\n", sz==8 ? "rex64/" : "", dis_buf);
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
delta += alen;
- gen_SIGNAL_if_not_16_aligned(addr);
+ gen_SIGNAL_if_not_16_aligned(vbi, addr);
DIP("%sfxsave %s\n", sz==8 ? "rex64/" : "", dis_buf);
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
delta += alen;
- gen_SIGNAL_if_not_64_aligned(addr);
+ gen_SIGNAL_if_not_64_aligned(vbi, addr);
DIP("%sxrstor %s\n", sz==8 ? "rex64/" : "", dis_buf);
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
delta += alen;
- gen_SIGNAL_if_not_16_aligned(addr);
+ gen_SIGNAL_if_not_16_aligned(vbi, addr);
DIP("%sfxrstor %s\n", sz==8 ? "rex64/" : "", dis_buf);
delta += 1;
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
putXMMReg( gregOfRexRM(pfx,modrm),
loadLE(Ity_V128, mkexpr(addr)) );
DIP("movapd %s,%s\n", dis_buf,
delta += 1;
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
putXMMReg( gregOfRexRM(pfx,modrm),
loadLE(Ity_V128, mkexpr(addr)) );
DIP("movaps %s,%s\n", dis_buf,
delta += 1;
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
storeLE( mkexpr(addr), getXMMReg(gregOfRexRM(pfx,modrm)) );
DIP("movaps %s,%s\n", nameXMMReg(gregOfRexRM(pfx,modrm)),
dis_buf );
delta += 1;
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
storeLE( mkexpr(addr), getXMMReg(gregOfRexRM(pfx,modrm)) );
DIP("movapd %s,%s\n", nameXMMReg(gregOfRexRM(pfx,modrm)),
dis_buf );
modrm = getUChar(delta);
if (!epartIsReg(modrm)) {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
storeLE( mkexpr(addr), getXMMReg(gregOfRexRM(pfx,modrm)) );
DIP("movntp%s %s,%s\n", sz==2 ? "d" : "s",
dis_buf,
delta += 1;
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
putXMMReg( gregOfRexRM(pfx,modrm),
loadLE(Ity_V128, mkexpr(addr)) );
DIP("movdqa %s,%s\n", dis_buf,
nameXMMReg(eregOfRexRM(pfx,modrm)));
} else {
addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
delta += alen;
storeLE( mkexpr(addr), getXMMReg(gregOfRexRM(pfx,modrm)) );
DIP("movdqa %s, %s\n", nameXMMReg(gregOfRexRM(pfx,modrm)), dis_buf);
modrm = getUChar(delta);
if (!epartIsReg(modrm)) {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
storeLE( mkexpr(addr), getXMMReg(gregOfRexRM(pfx,modrm)) );
DIP("movntdq %s,%s\n", dis_buf,
nameXMMReg(gregOfRexRM(pfx,modrm)));
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
if (!isAvx)
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
DIP("%smovs%cdup %s,%s\n",
isAvx ? "v" : "", isL ? 'l' : 'h', dis_buf, nameXMMReg(rG));
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
if (!isAvx)
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
DIP("%sph%s %s,%s\n", isAvx ? "v" : "", str,
dis_buf, nameXMMReg(rG));
nameXMMReg(gregOfRexRM(pfx,modrm)));
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
delta += alen;
DIP("pshufb %s,%s\n", dis_buf,
DIP("pmaddubsw %s,%s\n", nameXMMReg(rE), nameXMMReg(rG));
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
delta += alen;
DIP("pmaddubsw %s,%s\n", dis_buf, nameXMMReg(rG));
nameXMMReg(gregOfRexRM(pfx,modrm)));
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
delta += alen;
DIP("psign%s %s,%s\n", str, dis_buf,
nameXMMReg(gregOfRexRM(pfx,modrm)));
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
delta += alen;
DIP("pmulhrsw %s,%s\n", dis_buf,
nameXMMReg(gregOfRexRM(pfx,modrm)));
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
delta += alen;
DIP("pabs%s %s,%s\n", str, dis_buf,
nameXMMReg(gregOfRexRM(pfx,modrm)));
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 1 );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
d64 = (Long)getUChar(delta+alen);
delta += alen+1;
} else {
addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
if (!isAvx)
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
assign(vecE, loadLE( Ity_V128, mkexpr(addr) ));
delta += alen;
DIP( "%s%stest%s %s,%s\n",
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
if (!isAvx)
- gen_SIGNAL_if_not_16_aligned(addr);
+ gen_SIGNAL_if_not_16_aligned(vbi, addr);
assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
delta += alen;
DIP("%sphminposuw %s,%s\n", mbV, dis_buf, nameXMMReg(rG));
nameXMMReg( gregOfRexRM(pfx, modrm) ) );
} else {
addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
assign(vecE, loadLE( Ity_V128, mkexpr(addr) ));
delta += alen;
DIP( "%s %s,%s\n", nm,
modrm = getUChar(delta);
if (!epartIsReg(modrm)) {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
putXMMReg( gregOfRexRM(pfx,modrm),
loadLE(Ity_V128, mkexpr(addr)) );
DIP("movntdqa %s,%s\n", dis_buf,
nameXMMReg( gregOfRexRM(pfx, modrm) ) );
} else {
addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
assign( argL, loadLE( Ity_V128, mkexpr(addr) ));
delta += alen;
DIP( "packusdw %s,%s\n",
nameXMMReg( gregOfRexRM(pfx, modrm) ) );
} else {
addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
assign( argL, loadLE( Ity_V128, mkexpr(addr) ));
delta += alen;
DIP( "pmulld %s,%s\n",
nameXMMReg( gregOfRexRM(pfx, modrm) ) );
} else {
addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
- gen_SIGNAL_if_not_16_aligned(addr);
+ gen_SIGNAL_if_not_16_aligned(vbi, addr);
assign( src0, loadLE(Ity_F32,
binop(Iop_Add64, mkexpr(addr), mkU64(0) )));
assign( src1, loadLE(Ity_F32,
nameXMMReg( gregOfRexRM(pfx, modrm) ) );
} else {
addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
- gen_SIGNAL_if_not_16_aligned(addr);
+ gen_SIGNAL_if_not_16_aligned(vbi, addr);
assign( src0, loadLE(Ity_F64,
binop(Iop_Add64, mkexpr(addr), mkU64(0) )));
assign( src1, loadLE(Ity_F64,
} else {
addr = disAMode( &alen, vbi, pfx, delta, dis_buf,
1/* imm8 is 1 byte after the amode */ );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
assign( src_vec, loadLE( Ity_V128, mkexpr(addr) ) );
imm8 = (Int)getUChar(delta+alen);
delta += alen+1;
} else {
addr = disAMode( &alen, vbi, pfx, delta, dis_buf,
1/* imm8 is 1 byte after the amode */ );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
assign( src_vec, loadLE( Ity_V128, mkexpr(addr) ) );
imm8 = (Int)getUChar(delta+alen);
delta += alen+1;
} else {
addr = disAMode( &alen, vbi, pfx, delta, dis_buf,
1/* imm8 is 1 byte after the amode */ );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
assign( src_vec, loadLE( Ity_V128, mkexpr(addr) ) );
imm8 = (Int)getUChar(delta+alen);
delta += alen+1;
} else {
addr = disAMode( &alen, vbi, pfx, delta, dis_buf,
1/* imm8 is 1 byte after the amode */ );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
assign( src_vec, loadLE( Ity_V128, mkexpr(addr) ) );
imm8 = (Int)getUChar(delta+alen);
delta += alen+1;
} else {
addr = disAMode( &alen, vbi, pfx, delta, dis_buf,
1/* imm8 is 1 byte after the amode */ );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
assign( src_vec, loadLE( Ity_V128, mkexpr(addr) ) );
imm8 = (Int)getUChar(delta+alen);
delta += alen+1;
} else {
addr = disAMode( &alen, vbi, pfx, delta, dis_buf,
1/* imm8 is 1 byte after the amode */ );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
assign( src_vec, loadLE( Ity_V128, mkexpr(addr) ) );
imm8 = (Int)getUChar(delta+alen);
delta += alen+1;
} else {
addr = disAMode( &alen, vbi, pfx, delta, dis_buf,
1/* imm8 is 1 byte after the amode */ );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
assign( svec, loadLE( Ity_V128, mkexpr(addr) ) );
imm8 = (Int)getUChar(delta+alen);
delta += alen+1;
/* cmpxchg16b requires an alignment check. */
if (sz == 8)
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
/* Get the expected and new values. */
assign( expdHi64, getIReg64(R_RDX) );
delta += 1;
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
putYMMRegLoAndZU( rG, loadLE(Ity_V128, mkexpr(addr)) );
DIP("vmovapd %s,%s\n", dis_buf, nameXMMReg(rG));
delta += alen;
delta += 1;
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
- gen_SIGNAL_if_not_32_aligned( addr );
+ gen_SIGNAL_if_not_32_aligned( vbi, addr );
putYMMReg( rG, loadLE(Ity_V256, mkexpr(addr)) );
DIP("vmovapd %s,%s\n", dis_buf, nameYMMReg(rG));
delta += alen;
delta += 1;
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
putYMMRegLoAndZU( rG, loadLE(Ity_V128, mkexpr(addr)) );
DIP("vmovaps %s,%s\n", dis_buf, nameXMMReg(rG));
delta += alen;
delta += 1;
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
- gen_SIGNAL_if_not_32_aligned( addr );
+ gen_SIGNAL_if_not_32_aligned( vbi, addr );
putYMMReg( rG, loadLE(Ity_V256, mkexpr(addr)) );
DIP("vmovaps %s,%s\n", dis_buf, nameYMMReg(rG));
delta += alen;
delta += 1;
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
storeLE( mkexpr(addr), getXMMReg(rG) );
DIP("vmovapd %s,%s\n", nameXMMReg(rG), dis_buf );
delta += alen;
delta += 1;
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
- gen_SIGNAL_if_not_32_aligned( addr );
+ gen_SIGNAL_if_not_32_aligned( vbi, addr );
storeLE( mkexpr(addr), getYMMReg(rG) );
DIP("vmovapd %s,%s\n", nameYMMReg(rG), dis_buf );
delta += alen;
goto decode_success;
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
storeLE( mkexpr(addr), getXMMReg(rG) );
DIP("vmovaps %s,%s\n", nameXMMReg(rG), dis_buf );
delta += alen;
goto decode_success;
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
- gen_SIGNAL_if_not_32_aligned( addr );
+ gen_SIGNAL_if_not_32_aligned( vbi, addr );
storeLE( mkexpr(addr), getYMMReg(rG) );
DIP("vmovaps %s,%s\n", nameYMMReg(rG), dis_buf );
delta += alen;
assign(tS, getXMMReg(rS));
addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
delta += alen;
- gen_SIGNAL_if_not_16_aligned(addr);
+ gen_SIGNAL_if_not_16_aligned(vbi, addr);
storeLE(mkexpr(addr), mkexpr(tS));
DIP("vmovntp%c %s,%s\n", have66(pfx) ? 'd' : 's',
nameXMMReg(rS), dis_buf);
assign(tS, getYMMReg(rS));
addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
delta += alen;
- gen_SIGNAL_if_not_32_aligned(addr);
+ gen_SIGNAL_if_not_32_aligned(vbi, addr);
storeLE(mkexpr(addr), mkexpr(tS));
DIP("vmovntp%c %s,%s\n", have66(pfx) ? 'd' : 's',
nameYMMReg(rS), dis_buf);
addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
delta += alen;
if (isA)
- gen_SIGNAL_if_not_32_aligned(addr);
+ gen_SIGNAL_if_not_32_aligned(vbi, addr);
assign(tD, loadLE(Ity_V256, mkexpr(addr)));
DIP("vmovdq%c %s,%s\n", ch, dis_buf, nameYMMReg(rD));
}
addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
delta += alen;
if (isA)
- gen_SIGNAL_if_not_16_aligned(addr);
+ gen_SIGNAL_if_not_16_aligned(vbi, addr);
assign(tD, loadLE(Ity_V128, mkexpr(addr)));
DIP("vmovdq%c %s,%s\n", ch, dis_buf, nameXMMReg(rD));
}
addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
delta += alen;
if (isA)
- gen_SIGNAL_if_not_32_aligned(addr);
+ gen_SIGNAL_if_not_32_aligned(vbi, addr);
storeLE(mkexpr(addr), mkexpr(tS));
DIP("vmovdq%c %s,%s\n", ch, nameYMMReg(rS), dis_buf);
}
addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
delta += alen;
if (isA)
- gen_SIGNAL_if_not_16_aligned(addr);
+ gen_SIGNAL_if_not_16_aligned(vbi, addr);
storeLE(mkexpr(addr), mkexpr(tS));
DIP("vmovdq%c %s,%s\n", ch, nameXMMReg(rS), dis_buf);
}
UInt rG = gregOfRexRM(pfx,modrm);
if (!epartIsReg(modrm)) {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
- gen_SIGNAL_if_not_16_aligned( addr );
+ gen_SIGNAL_if_not_16_aligned( vbi, addr );
storeLE( mkexpr(addr), getXMMReg(rG) );
DIP("vmovntdq %s,%s\n", dis_buf, nameXMMReg(rG));
delta += alen;
UInt rG = gregOfRexRM(pfx,modrm);
if (!epartIsReg(modrm)) {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
- gen_SIGNAL_if_not_32_aligned( addr );
+ gen_SIGNAL_if_not_32_aligned( vbi, addr );
storeLE( mkexpr(addr), getYMMReg(rG) );
DIP("vmovntdq %s,%s\n", dis_buf, nameYMMReg(rG));
delta += alen;
IRTemp tD = newTemp(Ity_V128);
addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
delta += alen;
- gen_SIGNAL_if_not_16_aligned(addr);
+ gen_SIGNAL_if_not_16_aligned(vbi, addr);
assign(tD, loadLE(Ity_V128, mkexpr(addr)));
DIP("vmovntdqa %s,%s\n", dis_buf, nameXMMReg(rD));
putYMMRegLoAndZU(rD, mkexpr(tD));
IRTemp tD = newTemp(Ity_V256);
addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
delta += alen;
- gen_SIGNAL_if_not_32_aligned(addr);
+ gen_SIGNAL_if_not_32_aligned(vbi, addr);
assign(tD, loadLE(Ity_V256, mkexpr(addr)));
DIP("vmovntdqa %s,%s\n", dis_buf, nameYMMReg(rD));
putYMMReg(rD, mkexpr(tD));