/* Used by the optimiser to specialise calls to helpers. */
extern
-IRExpr* guest_amd64_spechelper ( HChar* function_name,
- IRExpr** args );
+IRExpr* guest_amd64_spechelper ( HChar* function_name,
+ IRExpr** args,
+ IRStmt** precedingStmts,
+ Int n_precedingStmts );
/* Describes to the optimiser which part of the guest state require
precise memory exceptions. This is logically part of the guest
}
IRExpr* guest_amd64_spechelper ( HChar* function_name,
- IRExpr** args )
+ IRExpr** args,
+ IRStmt** precedingStmts,
+ Int n_precedingStmts )
{
# define unop(_op,_a1) IRExpr_Unop((_op),(_a1))
# define binop(_op,_a1,_a2) IRExpr_Binop((_op),(_a1),(_a2))
/* Used by the optimiser to specialise calls to helpers. */
extern
-IRExpr* guest_ppc32_spechelper ( HChar* function_name,
- IRExpr** args );
+IRExpr* guest_ppc32_spechelper ( HChar* function_name,
+ IRExpr** args,
+ IRStmt** precedingStmts,
+ Int n_precedingStmts );
extern
-IRExpr* guest_ppc64_spechelper ( HChar* function_name,
- IRExpr** args );
+IRExpr* guest_ppc64_spechelper ( HChar* function_name,
+ IRExpr** args,
+ IRStmt** precedingStmts,
+ Int n_precedingStmts );
/* Describes to the optimser which part of the guest state require
precise memory exceptions. This is logically part of the guest
/* Helper-function specialiser. */
IRExpr* guest_ppc32_spechelper ( HChar* function_name,
- IRExpr** args )
+ IRExpr** args,
+ IRStmt** precedingStmts,
+ Int n_precedingStmts )
{
return NULL;
}
IRExpr* guest_ppc64_spechelper ( HChar* function_name,
- IRExpr** args )
+ IRExpr** args,
+ IRStmt** precedingStmts,
+ Int n_precedingStmts )
{
return NULL;
}
/* Used by the optimiser to specialise calls to helpers. */
extern
-IRExpr* guest_x86_spechelper ( HChar* function_name,
- IRExpr** args );
+IRExpr* guest_x86_spechelper ( HChar* function_name,
+ IRExpr** args,
+ IRStmt** precedingStmts,
+ Int n_precedingStmts );
/* Describes to the optimiser which part of the guest state require
precise memory exceptions. This is logically part of the guest
&& e->Iex.Const.con->Ico.U32 == n );
}
-IRExpr* guest_x86_spechelper ( HChar* function_name,
- IRExpr** args )
+IRExpr* guest_x86_spechelper ( HChar* function_name,
+ IRExpr** args,
+ IRStmt** precedingStmts,
+ Int n_precedingStmts )
{
# define unop(_op,_a1) IRExpr_Unop((_op),(_a1))
# define binop(_op,_a1,_a2) IRExpr_Binop((_op),(_a1),(_a2))
switch (op) {
case Iop_Xor8: return IRExpr_Const(IRConst_U8(0));
case Iop_Xor16: return IRExpr_Const(IRConst_U16(0));
+ case Iop_Sub32:
case Iop_Xor32: return IRExpr_Const(IRConst_U32(0));
case Iop_Xor64: return IRExpr_Const(IRConst_U64(0));
case Iop_XorV128: return IRExpr_Const(IRConst_V128(0));
}
/* Xor8/16/32/64/V128(t,t) ==> 0, for some IRTemp t */
+ /* Sub32(t,t) ==> 0, for some IRTemp t */
if ( (e->Iex.Binop.op == Iop_Xor64
|| e->Iex.Binop.op == Iop_Xor32
|| e->Iex.Binop.op == Iop_Xor16
|| e->Iex.Binop.op == Iop_Xor8
- || e->Iex.Binop.op == Iop_XorV128)
+ || e->Iex.Binop.op == Iop_XorV128
+ || e->Iex.Binop.op == Iop_Sub32)
&& sameIRTemps(e->Iex.Binop.arg1, e->Iex.Binop.arg2)) {
e2 = mkZeroForXor(e->Iex.Binop.op);
}
/*---------------------------------------------------------------*/
static
-IRSB* spec_helpers_BB ( IRSB* bb,
- IRExpr* (*specHelper) ( HChar*, IRExpr**) )
+IRSB* spec_helpers_BB(
+ IRSB* bb,
+ IRExpr* (*specHelper) (HChar*, IRExpr**, IRStmt**, Int)
+ )
{
Int i;
IRStmt* st;
continue;
ex = (*specHelper)( st->Ist.WrTmp.data->Iex.CCall.cee->name,
- st->Ist.WrTmp.data->Iex.CCall.args );
+ st->Ist.WrTmp.data->Iex.CCall.args,
+ &bb->stmts[0], i );
if (!ex)
/* the front end can't think of a suitable replacement */
continue;
static
IRSB* cheap_transformations (
IRSB* bb,
- IRExpr* (*specHelper) (HChar*, IRExpr**),
+ IRExpr* (*specHelper) (HChar*, IRExpr**, IRStmt**, Int),
Bool (*preciseMemExnsFn)(Int,Int)
)
{
*/
-IRSB* do_iropt_BB ( IRSB* bb0,
- IRExpr* (*specHelper) (HChar*, IRExpr**),
- Bool (*preciseMemExnsFn)(Int,Int),
- Addr64 guest_addr )
+IRSB* do_iropt_BB(
+ IRSB* bb0,
+ IRExpr* (*specHelper) (HChar*, IRExpr**, IRStmt**, Int),
+ Bool (*preciseMemExnsFn)(Int,Int),
+ Addr64 guest_addr,
+ VexArch guest_arch
+ )
{
static Int n_total = 0;
static Int n_expensive = 0;
bb = cheap_transformations( bb, specHelper, preciseMemExnsFn );
+ if (guest_arch == VexArchARM) {
+ /* Translating Thumb2 code produces a lot of chaff. We have to
+ work extra hard to get rid of it. */
+ bb = cprop_BB(bb);
+ bb = spec_helpers_BB ( bb, specHelper );
+ redundant_put_removal_BB ( bb, preciseMemExnsFn );
+ do_deadcode_BB( bb );
+ }
+
if (vex_control.iropt_level > 1) {
/* Peer at what we have, to decide how much more effort to throw
/* Top level optimiser entry point. Returns a new BB. Operates
under the control of the global "vex_control" struct. */
extern
-IRSB* do_iropt_BB ( IRSB* bb,
- IRExpr* (*specHelper) (HChar*, IRExpr**),
- Bool (*preciseMemExnsFn)(Int,Int),
- Addr64 guest_addr );
+IRSB* do_iropt_BB(
+ IRSB* bb,
+ IRExpr* (*specHelper) (HChar*, IRExpr**, IRStmt**, Int),
+ Bool (*preciseMemExnsFn)(Int,Int),
+ Addr64 guest_addr,
+ VexArch guest_arch
+ );
/* Do a constant folding/propagation pass. */
extern