From 33012dd82ba5f3502d3c6c5d95cf22b7af48f6b3 Mon Sep 17 00:00:00 2001 From: Paul Floyd Date: Sun, 3 Oct 2021 16:06:07 +0200 Subject: [PATCH] FreeBSD support, patch 1 Primarily these changes concern the nature of alignment generated signals. On Linux, these produce SIGSEGV and n FreeBSD they produce SIGBUS. --- VEX/priv/guest_amd64_toIR.c | 131 +++++++++++++++++++----------------- VEX/priv/guest_ppc_toIR.c | 2 +- VEX/priv/host_amd64_defs.c | 1 + VEX/priv/host_amd64_isel.c | 2 + VEX/priv/main_globals.c | 2 +- VEX/pub/libvex.h | 2 +- 6 files changed, 74 insertions(+), 66 deletions(-) diff --git a/VEX/priv/guest_amd64_toIR.c b/VEX/priv/guest_amd64_toIR.c index c6296f3987..c7f94b15b5 100644 --- a/VEX/priv/guest_amd64_toIR.c +++ b/VEX/priv/guest_amd64_toIR.c @@ -10132,38 +10132,43 @@ static IRTemp math_PALIGNR_XMM ( IRTemp sV, IRTemp dV, UInt imm8 ) return res; } - /* Generate a SIGSEGV followed by a restart of the current instruction if effective_addr is not 16-aligned. This is required behaviour for some SSE3 instructions and all 128-bit SSSE3 instructions. - This assumes that guest_RIP_curr_instr is set correctly! */ + This assumes that guest_RIP_curr_instr is set correctly! + On FreeBSD, this kind of error generates a SIGBUS. */ static -void gen_SEGV_if_not_XX_aligned ( IRTemp effective_addr, ULong mask ) +void gen_SIGNAL_if_not_XX_aligned ( IRTemp effective_addr, ULong mask ) { stmt( IRStmt_Exit( binop(Iop_CmpNE64, binop(Iop_And64,mkexpr(effective_addr),mkU64(mask)), mkU64(0)), +#if defined(VGO_freebsd) + Ijk_SigBUS, +#else Ijk_SigSEGV, +#endif IRConst_U64(guest_RIP_curr_instr), OFFB_RIP ) ); } -static void gen_SEGV_if_not_16_aligned ( IRTemp effective_addr ) { - gen_SEGV_if_not_XX_aligned(effective_addr, 16-1); +static void gen_SIGNAL_if_not_16_aligned ( IRTemp effective_addr ) { + gen_SIGNAL_if_not_XX_aligned(effective_addr, 16-1); } -static void gen_SEGV_if_not_32_aligned ( IRTemp effective_addr ) { - gen_SEGV_if_not_XX_aligned(effective_addr, 32-1); +static void gen_SIGNAL_if_not_32_aligned ( IRTemp effective_addr ) { + gen_SIGNAL_if_not_XX_aligned(effective_addr, 32-1); } -static void gen_SEGV_if_not_64_aligned ( IRTemp effective_addr ) { - gen_SEGV_if_not_XX_aligned(effective_addr, 64-1); +static void gen_SIGNAL_if_not_64_aligned ( IRTemp effective_addr ) { + gen_SIGNAL_if_not_XX_aligned(effective_addr, 64-1); } + /* Helper for deciding whether a given insn (starting at the opcode byte) may validly be used with a LOCK prefix. The following insns may be used with LOCK when their destination operand is in memory. @@ -11916,7 +11921,7 @@ static Long dis_XSAVE ( const VexAbiInfo* vbi, addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); delta += alen; - gen_SEGV_if_not_64_aligned(addr); + gen_SIGNAL_if_not_64_aligned(addr); DIP("%sxsave %s\n", sz==8 ? "rex64/" : "", dis_buf); @@ -11960,7 +11965,7 @@ static Long dis_FXSAVE ( const VexAbiInfo* vbi, addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); delta += alen; - gen_SEGV_if_not_16_aligned(addr); + gen_SIGNAL_if_not_16_aligned(addr); DIP("%sfxsave %s\n", sz==8 ? "rex64/" : "", dis_buf); @@ -12171,7 +12176,7 @@ static Long dis_XRSTOR ( const VexAbiInfo* vbi, addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); delta += alen; - gen_SEGV_if_not_64_aligned(addr); + gen_SIGNAL_if_not_64_aligned(addr); DIP("%sxrstor %s\n", sz==8 ? "rex64/" : "", dis_buf); @@ -12241,7 +12246,7 @@ static Long dis_FXRSTOR ( const VexAbiInfo* vbi, addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); delta += alen; - gen_SEGV_if_not_16_aligned(addr); + gen_SIGNAL_if_not_16_aligned(addr); DIP("%sfxrstor %s\n", sz==8 ? "rex64/" : "", dis_buf); @@ -12947,7 +12952,7 @@ Long dis_ESC_0F__SSE2 ( Bool* decode_OK, delta += 1; } else { addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); putXMMReg( gregOfRexRM(pfx,modrm), loadLE(Ity_V128, mkexpr(addr)) ); DIP("movapd %s,%s\n", dis_buf, @@ -12968,7 +12973,7 @@ Long dis_ESC_0F__SSE2 ( Bool* decode_OK, delta += 1; } else { addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); putXMMReg( gregOfRexRM(pfx,modrm), loadLE(Ity_V128, mkexpr(addr)) ); DIP("movaps %s,%s\n", dis_buf, @@ -12992,7 +12997,7 @@ Long dis_ESC_0F__SSE2 ( Bool* decode_OK, delta += 1; } else { addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); storeLE( mkexpr(addr), getXMMReg(gregOfRexRM(pfx,modrm)) ); DIP("movaps %s,%s\n", nameXMMReg(gregOfRexRM(pfx,modrm)), dis_buf ); @@ -13012,7 +13017,7 @@ Long dis_ESC_0F__SSE2 ( Bool* decode_OK, delta += 1; } else { addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); storeLE( mkexpr(addr), getXMMReg(gregOfRexRM(pfx,modrm)) ); DIP("movapd %s,%s\n", nameXMMReg(gregOfRexRM(pfx,modrm)), dis_buf ); @@ -13210,7 +13215,7 @@ Long dis_ESC_0F__SSE2 ( Bool* decode_OK, modrm = getUChar(delta); if (!epartIsReg(modrm)) { addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); storeLE( mkexpr(addr), getXMMReg(gregOfRexRM(pfx,modrm)) ); DIP("movntp%s %s,%s\n", sz==2 ? "d" : "s", dis_buf, @@ -13955,7 +13960,7 @@ Long dis_ESC_0F__SSE2 ( Bool* decode_OK, delta += 1; } else { addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); putXMMReg( gregOfRexRM(pfx,modrm), loadLE(Ity_V128, mkexpr(addr)) ); DIP("movdqa %s,%s\n", dis_buf, @@ -14255,7 +14260,7 @@ Long dis_ESC_0F__SSE2 ( Bool* decode_OK, nameXMMReg(eregOfRexRM(pfx,modrm))); } else { addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); delta += alen; storeLE( mkexpr(addr), getXMMReg(gregOfRexRM(pfx,modrm)) ); DIP("movdqa %s, %s\n", nameXMMReg(gregOfRexRM(pfx,modrm)), dis_buf); @@ -14936,7 +14941,7 @@ Long dis_ESC_0F__SSE2 ( Bool* decode_OK, modrm = getUChar(delta); if (!epartIsReg(modrm)) { addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); storeLE( mkexpr(addr), getXMMReg(gregOfRexRM(pfx,modrm)) ); DIP("movntdq %s,%s\n", dis_buf, nameXMMReg(gregOfRexRM(pfx,modrm))); @@ -15363,7 +15368,7 @@ static Long dis_MOVSxDUP_128 ( const VexAbiInfo* vbi, Prefix pfx, } else { addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); if (!isAvx) - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); assign( sV, loadLE(Ity_V128, mkexpr(addr)) ); DIP("%smovs%cdup %s,%s\n", isAvx ? "v" : "", isL ? 'l' : 'h', dis_buf, nameXMMReg(rG)); @@ -15718,7 +15723,7 @@ static Long dis_PHADD_128 ( const VexAbiInfo* vbi, Prefix pfx, Long delta, } else { addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); if (!isAvx) - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); assign( sV, loadLE(Ity_V128, mkexpr(addr)) ); DIP("%sph%s %s,%s\n", isAvx ? "v" : "", str, dis_buf, nameXMMReg(rG)); @@ -15878,7 +15883,7 @@ Long dis_ESC_0F38__SupSSE3 ( Bool* decode_OK, nameXMMReg(gregOfRexRM(pfx,modrm))); } else { addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); assign( sV, loadLE(Ity_V128, mkexpr(addr)) ); delta += alen; DIP("pshufb %s,%s\n", dis_buf, @@ -16030,7 +16035,7 @@ Long dis_ESC_0F38__SupSSE3 ( Bool* decode_OK, DIP("pmaddubsw %s,%s\n", nameXMMReg(rE), nameXMMReg(rG)); } else { addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); assign( sV, loadLE(Ity_V128, mkexpr(addr)) ); delta += alen; DIP("pmaddubsw %s,%s\n", dis_buf, nameXMMReg(rG)); @@ -16125,7 +16130,7 @@ Long dis_ESC_0F38__SupSSE3 ( Bool* decode_OK, nameXMMReg(gregOfRexRM(pfx,modrm))); } else { addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); assign( sV, loadLE(Ity_V128, mkexpr(addr)) ); delta += alen; DIP("psign%s %s,%s\n", str, dis_buf, @@ -16209,7 +16214,7 @@ Long dis_ESC_0F38__SupSSE3 ( Bool* decode_OK, nameXMMReg(gregOfRexRM(pfx,modrm))); } else { addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); assign( sV, loadLE(Ity_V128, mkexpr(addr)) ); delta += alen; DIP("pmulhrsw %s,%s\n", dis_buf, @@ -16288,7 +16293,7 @@ Long dis_ESC_0F38__SupSSE3 ( Bool* decode_OK, nameXMMReg(gregOfRexRM(pfx,modrm))); } else { addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); assign( sV, loadLE(Ity_V128, mkexpr(addr)) ); delta += alen; DIP("pabs%s %s,%s\n", str, dis_buf, @@ -16395,7 +16400,7 @@ Long dis_ESC_0F3A__SupSSE3 ( Bool* decode_OK, nameXMMReg(gregOfRexRM(pfx,modrm))); } else { addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 1 ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); assign( sV, loadLE(Ity_V128, mkexpr(addr)) ); d64 = (Long)getUChar(delta+alen); delta += alen+1; @@ -16940,7 +16945,7 @@ static Long dis_xTESTy_128 ( const VexAbiInfo* vbi, Prefix pfx, } else { addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 ); if (!isAvx) - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); assign(vecE, loadLE( Ity_V128, mkexpr(addr) )); delta += alen; DIP( "%s%stest%s %s,%s\n", @@ -17742,7 +17747,7 @@ static Long dis_PHMINPOSUW_128 ( const VexAbiInfo* vbi, Prefix pfx, } else { addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); if (!isAvx) - gen_SEGV_if_not_16_aligned(addr); + gen_SIGNAL_if_not_16_aligned(addr); assign( sV, loadLE(Ity_V128, mkexpr(addr)) ); delta += alen; DIP("%sphminposuw %s,%s\n", mbV, dis_buf, nameXMMReg(rG)); @@ -17976,7 +17981,7 @@ Long dis_ESC_0F38__SSE4 ( Bool* decode_OK, nameXMMReg( gregOfRexRM(pfx, modrm) ) ); } else { addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); assign(vecE, loadLE( Ity_V128, mkexpr(addr) )); delta += alen; DIP( "%s %s,%s\n", nm, @@ -18109,7 +18114,7 @@ Long dis_ESC_0F38__SSE4 ( Bool* decode_OK, modrm = getUChar(delta); if (!epartIsReg(modrm)) { addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); putXMMReg( gregOfRexRM(pfx,modrm), loadLE(Ity_V128, mkexpr(addr)) ); DIP("movntdqa %s,%s\n", dis_buf, @@ -18138,7 +18143,7 @@ Long dis_ESC_0F38__SSE4 ( Bool* decode_OK, nameXMMReg( gregOfRexRM(pfx, modrm) ) ); } else { addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); assign( argL, loadLE( Ity_V128, mkexpr(addr) )); delta += alen; DIP( "packusdw %s,%s\n", @@ -18321,7 +18326,7 @@ Long dis_ESC_0F38__SSE4 ( Bool* decode_OK, nameXMMReg( gregOfRexRM(pfx, modrm) ) ); } else { addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); assign( argL, loadLE( Ity_V128, mkexpr(addr) )); delta += alen; DIP( "pmulld %s,%s\n", @@ -19292,7 +19297,7 @@ Long dis_ESC_0F3A__SSE4 ( Bool* decode_OK, nameXMMReg( gregOfRexRM(pfx, modrm) ) ); } else { addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 ); - gen_SEGV_if_not_16_aligned(addr); + gen_SIGNAL_if_not_16_aligned(addr); assign( src0, loadLE(Ity_F32, binop(Iop_Add64, mkexpr(addr), mkU64(0) ))); assign( src1, loadLE(Ity_F32, @@ -19354,7 +19359,7 @@ Long dis_ESC_0F3A__SSE4 ( Bool* decode_OK, nameXMMReg( gregOfRexRM(pfx, modrm) ) ); } else { addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 ); - gen_SEGV_if_not_16_aligned(addr); + gen_SIGNAL_if_not_16_aligned(addr); assign( src0, loadLE(Ity_F64, binop(Iop_Add64, mkexpr(addr), mkU64(0) ))); assign( src1, loadLE(Ity_F64, @@ -19459,7 +19464,7 @@ Long dis_ESC_0F3A__SSE4 ( Bool* decode_OK, } else { addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1/* imm8 is 1 byte after the amode */ ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); assign( src_vec, loadLE( Ity_V128, mkexpr(addr) ) ); imm8 = (Int)getUChar(delta+alen); delta += alen+1; @@ -19495,7 +19500,7 @@ Long dis_ESC_0F3A__SSE4 ( Bool* decode_OK, } else { addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1/* imm8 is 1 byte after the amode */ ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); assign( src_vec, loadLE( Ity_V128, mkexpr(addr) ) ); imm8 = (Int)getUChar(delta+alen); delta += alen+1; @@ -19532,7 +19537,7 @@ Long dis_ESC_0F3A__SSE4 ( Bool* decode_OK, } else { addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1/* imm8 is 1 byte after the amode */ ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); assign( src_vec, loadLE( Ity_V128, mkexpr(addr) ) ); imm8 = (Int)getUChar(delta+alen); delta += alen+1; @@ -19754,7 +19759,7 @@ Long dis_ESC_0F3A__SSE4 ( Bool* decode_OK, } else { addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1/* imm8 is 1 byte after the amode */ ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); assign( src_vec, loadLE( Ity_V128, mkexpr(addr) ) ); imm8 = (Int)getUChar(delta+alen); delta += alen+1; @@ -19787,7 +19792,7 @@ Long dis_ESC_0F3A__SSE4 ( Bool* decode_OK, } else { addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1/* imm8 is 1 byte after the amode */ ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); assign( src_vec, loadLE( Ity_V128, mkexpr(addr) ) ); imm8 = (Int)getUChar(delta+alen); delta += alen+1; @@ -19823,7 +19828,7 @@ Long dis_ESC_0F3A__SSE4 ( Bool* decode_OK, } else { addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1/* imm8 is 1 byte after the amode */ ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); assign( src_vec, loadLE( Ity_V128, mkexpr(addr) ) ); imm8 = (Int)getUChar(delta+alen); delta += alen+1; @@ -19860,7 +19865,7 @@ Long dis_ESC_0F3A__SSE4 ( Bool* decode_OK, } else { addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1/* imm8 is 1 byte after the amode */ ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); assign( svec, loadLE( Ity_V128, mkexpr(addr) ) ); imm8 = (Int)getUChar(delta+alen); delta += alen+1; @@ -22359,7 +22364,7 @@ Long dis_ESC_0F ( /* cmpxchg16b requires an alignment check. */ if (sz == 8) - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); /* Get the expected and new values. */ assign( expdHi64, getIReg64(R_RDX) ); @@ -24764,7 +24769,7 @@ Long dis_ESC_0F__VEX ( delta += 1; } else { addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); putYMMRegLoAndZU( rG, loadLE(Ity_V128, mkexpr(addr)) ); DIP("vmovapd %s,%s\n", dis_buf, nameXMMReg(rG)); delta += alen; @@ -24782,7 +24787,7 @@ Long dis_ESC_0F__VEX ( delta += 1; } else { addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); - gen_SEGV_if_not_32_aligned( addr ); + gen_SIGNAL_if_not_32_aligned( addr ); putYMMReg( rG, loadLE(Ity_V256, mkexpr(addr)) ); DIP("vmovapd %s,%s\n", dis_buf, nameYMMReg(rG)); delta += alen; @@ -24800,7 +24805,7 @@ Long dis_ESC_0F__VEX ( delta += 1; } else { addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); putYMMRegLoAndZU( rG, loadLE(Ity_V128, mkexpr(addr)) ); DIP("vmovaps %s,%s\n", dis_buf, nameXMMReg(rG)); delta += alen; @@ -24818,7 +24823,7 @@ Long dis_ESC_0F__VEX ( delta += 1; } else { addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); - gen_SEGV_if_not_32_aligned( addr ); + gen_SIGNAL_if_not_32_aligned( addr ); putYMMReg( rG, loadLE(Ity_V256, mkexpr(addr)) ); DIP("vmovaps %s,%s\n", dis_buf, nameYMMReg(rG)); delta += alen; @@ -24839,7 +24844,7 @@ Long dis_ESC_0F__VEX ( delta += 1; } else { addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); storeLE( mkexpr(addr), getXMMReg(rG) ); DIP("vmovapd %s,%s\n", nameXMMReg(rG), dis_buf ); delta += alen; @@ -24857,7 +24862,7 @@ Long dis_ESC_0F__VEX ( delta += 1; } else { addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); - gen_SEGV_if_not_32_aligned( addr ); + gen_SIGNAL_if_not_32_aligned( addr ); storeLE( mkexpr(addr), getYMMReg(rG) ); DIP("vmovapd %s,%s\n", nameYMMReg(rG), dis_buf ); delta += alen; @@ -24876,7 +24881,7 @@ Long dis_ESC_0F__VEX ( goto decode_success; } else { addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); storeLE( mkexpr(addr), getXMMReg(rG) ); DIP("vmovaps %s,%s\n", nameXMMReg(rG), dis_buf ); delta += alen; @@ -24895,7 +24900,7 @@ Long dis_ESC_0F__VEX ( goto decode_success; } else { addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); - gen_SEGV_if_not_32_aligned( addr ); + gen_SIGNAL_if_not_32_aligned( addr ); storeLE( mkexpr(addr), getYMMReg(rG) ); DIP("vmovaps %s,%s\n", nameYMMReg(rG), dis_buf ); delta += alen; @@ -25034,7 +25039,7 @@ Long dis_ESC_0F__VEX ( assign(tS, getXMMReg(rS)); addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 ); delta += alen; - gen_SEGV_if_not_16_aligned(addr); + gen_SIGNAL_if_not_16_aligned(addr); storeLE(mkexpr(addr), mkexpr(tS)); DIP("vmovntp%c %s,%s\n", have66(pfx) ? 'd' : 's', nameXMMReg(rS), dis_buf); @@ -25050,7 +25055,7 @@ Long dis_ESC_0F__VEX ( assign(tS, getYMMReg(rS)); addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 ); delta += alen; - gen_SEGV_if_not_32_aligned(addr); + gen_SIGNAL_if_not_32_aligned(addr); storeLE(mkexpr(addr), mkexpr(tS)); DIP("vmovntp%c %s,%s\n", have66(pfx) ? 'd' : 's', nameYMMReg(rS), dis_buf); @@ -26041,7 +26046,7 @@ Long dis_ESC_0F__VEX ( addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 ); delta += alen; if (isA) - gen_SEGV_if_not_32_aligned(addr); + gen_SIGNAL_if_not_32_aligned(addr); assign(tD, loadLE(Ity_V256, mkexpr(addr))); DIP("vmovdq%c %s,%s\n", ch, dis_buf, nameYMMReg(rD)); } @@ -26066,7 +26071,7 @@ Long dis_ESC_0F__VEX ( addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 ); delta += alen; if (isA) - gen_SEGV_if_not_16_aligned(addr); + gen_SIGNAL_if_not_16_aligned(addr); assign(tD, loadLE(Ity_V128, mkexpr(addr))); DIP("vmovdq%c %s,%s\n", ch, dis_buf, nameXMMReg(rD)); } @@ -26604,7 +26609,7 @@ Long dis_ESC_0F__VEX ( addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 ); delta += alen; if (isA) - gen_SEGV_if_not_32_aligned(addr); + gen_SIGNAL_if_not_32_aligned(addr); storeLE(mkexpr(addr), mkexpr(tS)); DIP("vmovdq%c %s,%s\n", ch, nameYMMReg(rS), dis_buf); } @@ -26629,7 +26634,7 @@ Long dis_ESC_0F__VEX ( addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 ); delta += alen; if (isA) - gen_SEGV_if_not_16_aligned(addr); + gen_SIGNAL_if_not_16_aligned(addr); storeLE(mkexpr(addr), mkexpr(tS)); DIP("vmovdq%c %s,%s\n", ch, nameXMMReg(rS), dis_buf); } @@ -27301,7 +27306,7 @@ Long dis_ESC_0F__VEX ( UInt rG = gregOfRexRM(pfx,modrm); if (!epartIsReg(modrm)) { addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); - gen_SEGV_if_not_16_aligned( addr ); + gen_SIGNAL_if_not_16_aligned( addr ); storeLE( mkexpr(addr), getXMMReg(rG) ); DIP("vmovntdq %s,%s\n", dis_buf, nameXMMReg(rG)); delta += alen; @@ -27315,7 +27320,7 @@ Long dis_ESC_0F__VEX ( UInt rG = gregOfRexRM(pfx,modrm); if (!epartIsReg(modrm)) { addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); - gen_SEGV_if_not_32_aligned( addr ); + gen_SIGNAL_if_not_32_aligned( addr ); storeLE( mkexpr(addr), getYMMReg(rG) ); DIP("vmovntdq %s,%s\n", dis_buf, nameYMMReg(rG)); delta += alen; @@ -28936,7 +28941,7 @@ Long dis_ESC_0F38__VEX ( IRTemp tD = newTemp(Ity_V128); addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 ); delta += alen; - gen_SEGV_if_not_16_aligned(addr); + gen_SIGNAL_if_not_16_aligned(addr); assign(tD, loadLE(Ity_V128, mkexpr(addr))); DIP("vmovntdqa %s,%s\n", dis_buf, nameXMMReg(rD)); putYMMRegLoAndZU(rD, mkexpr(tD)); @@ -28950,7 +28955,7 @@ Long dis_ESC_0F38__VEX ( IRTemp tD = newTemp(Ity_V256); addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 ); delta += alen; - gen_SEGV_if_not_32_aligned(addr); + gen_SIGNAL_if_not_32_aligned(addr); assign(tD, loadLE(Ity_V256, mkexpr(addr))); DIP("vmovntdqa %s,%s\n", dis_buf, nameYMMReg(rD)); putYMMReg(rD, mkexpr(tD)); diff --git a/VEX/priv/guest_ppc_toIR.c b/VEX/priv/guest_ppc_toIR.c index 5d9f6b84a2..d90d566ed1 100644 --- a/VEX/priv/guest_ppc_toIR.c +++ b/VEX/priv/guest_ppc_toIR.c @@ -4776,7 +4776,7 @@ static IRExpr * is_NaN_Vector( IRType element_size, IRExpr *src ) IRTemp zeroV128 = newTemp( Ity_V128 ); IRTemp exp_maskV128 = newTemp( Ity_V128 ); IRTemp frac_maskV128 = newTemp( Ity_V128 ); - IROp opCmpEQ; + IROp opCmpEQ = Iop_INVALID; assign( zeroV128, mkV128( 0 ) ); diff --git a/VEX/priv/host_amd64_defs.c b/VEX/priv/host_amd64_defs.c index 3d237e112d..69afab739c 100644 --- a/VEX/priv/host_amd64_defs.c +++ b/VEX/priv/host_amd64_defs.c @@ -3268,6 +3268,7 @@ Int emit_AMD64Instr ( /*MB_MOD*/Bool* is_profInc, case Ijk_NoRedir: trcval = VEX_TRC_JMP_NOREDIR; break; case Ijk_SigTRAP: trcval = VEX_TRC_JMP_SIGTRAP; break; case Ijk_SigSEGV: trcval = VEX_TRC_JMP_SIGSEGV; break; + case Ijk_SigBUS: trcval = VEX_TRC_JMP_SIGBUS; break; case Ijk_Boring: trcval = VEX_TRC_JMP_BORING; break; /* We don't expect to see the following being assisted. */ case Ijk_Ret: diff --git a/VEX/priv/host_amd64_isel.c b/VEX/priv/host_amd64_isel.c index 3299c3df91..8687079c1a 100644 --- a/VEX/priv/host_amd64_isel.c +++ b/VEX/priv/host_amd64_isel.c @@ -5150,6 +5150,7 @@ static void iselStmt ( ISelEnv* env, IRStmt* stmt ) case Ijk_NoDecode: case Ijk_NoRedir: case Ijk_SigSEGV: + case Ijk_SigBUS: case Ijk_SigTRAP: case Ijk_Sys_syscall: case Ijk_Sys_int210: @@ -5246,6 +5247,7 @@ static void iselNext ( ISelEnv* env, case Ijk_NoDecode: case Ijk_NoRedir: case Ijk_SigSEGV: + case Ijk_SigBUS: case Ijk_SigTRAP: case Ijk_Sys_syscall: case Ijk_Sys_int210: diff --git a/VEX/priv/main_globals.c b/VEX/priv/main_globals.c index b2243f565c..9167e8131c 100644 --- a/VEX/priv/main_globals.c +++ b/VEX/priv/main_globals.c @@ -57,7 +57,7 @@ Int vex_debuglevel = 0; Int vex_traceflags = 0; /* Max # guest insns per bb */ -VexControl vex_control = { 0,0,False,0,0,0 }; +VexControl vex_control = { 0,0,VexRegUpd_INVALID,0,0,False,0 }; diff --git a/VEX/pub/libvex.h b/VEX/pub/libvex.h index dcf022159b..8d911db440 100644 --- a/VEX/pub/libvex.h +++ b/VEX/pub/libvex.h @@ -635,7 +635,7 @@ typedef extern void LibVEX_Init ( /* failure exit function */ -# if __cplusplus == 1 && __GNUC__ && __GNUC__ <= 3 +# if defined(__cplusplus) && defined(__GNUC__) && __GNUC__ <= 3 /* g++ 3.x doesn't understand attributes on function parameters. See #265762. */ # else -- 2.47.2