//------------------------------------------------------------
// Primary data structure #2: InstrInfo table
// - Holds the cached info about each instr that is used for simulation.
-// - table(BB_start_addr, list(InstrInfo))
-// - For each BB, each InstrInfo in the list holds info about the
+// - table(SB_start_addr, list(InstrInfo))
+// - For each SB, each InstrInfo in the list holds info about the
// instruction (instrLen, instrAddr, etc), plus a pointer to its line
// CC. This node is what's passed to the simulation function.
-// - When BBs are discarded the relevant list(instr_details) is freed.
+// - When SBs are discarded the relevant list(instr_details) is freed.
typedef struct _InstrInfo InstrInfo;
struct _InstrInfo {
LineCC* parent; // parent line-CC
};
-typedef struct _BB_info BB_info;
-struct _BB_info {
- Addr BB_addr; // key; MUST BE FIRST
+typedef struct _SB_info SB_info;
+struct _SB_info {
+ Addr SB_addr; // key; MUST BE FIRST
Int n_instrs;
InstrInfo instrs[0];
};
Int events_used;
/* The array of InstrInfo bins for the BB. */
- BB_info* bbInfo;
+ SB_info* sbInfo;
/* Number InstrInfo bins 'used' so far. */
- Int bbInfo_i;
+ Int sbInfo_i;
- /* The output BB being constructed. */
- IRBB* bbOut;
+ /* The output SB being constructed. */
+ IRSB* sbOut;
}
CgState;
// Note that origAddr is the real origAddr, not the address of the first
// instruction in the block (they can be different due to redirection).
static
-BB_info* get_BB_info(IRBB* bbIn, Addr origAddr)
+SB_info* get_SB_info(IRSB* sbIn, Addr origAddr)
{
Int i, n_instrs;
IRStmt* st;
- BB_info* bbInfo;
+ SB_info* sbInfo;
- // Count number of original instrs in BB
+ // Count number of original instrs in SB
n_instrs = 0;
- for (i = 0; i < bbIn->stmts_used; i++) {
- st = bbIn->stmts[i];
+ for (i = 0; i < sbIn->stmts_used; i++) {
+ st = sbIn->stmts[i];
if (Ist_IMark == st->tag) n_instrs++;
}
// If this assertion fails, there has been some screwup: some
// translations must have been discarded but Cachegrind hasn't discarded
// the corresponding entries in the instr-info table.
- bbInfo = VG_(OSet_Lookup)(instrInfoTable, &origAddr);
- tl_assert(NULL == bbInfo);
+ sbInfo = VG_(OSet_Lookup)(instrInfoTable, &origAddr);
+ tl_assert(NULL == sbInfo);
// BB never translated before (at this address, at least; could have
// been unloaded and then reloaded elsewhere in memory)
- bbInfo = VG_(OSet_AllocNode)(instrInfoTable,
- sizeof(BB_info) + n_instrs*sizeof(InstrInfo));
- bbInfo->BB_addr = origAddr;
- bbInfo->n_instrs = n_instrs;
- VG_(OSet_Insert)( instrInfoTable, bbInfo );
+ sbInfo = VG_(OSet_AllocNode)(instrInfoTable,
+ sizeof(SB_info) + n_instrs*sizeof(InstrInfo));
+ sbInfo->SB_addr = origAddr;
+ sbInfo->n_instrs = n_instrs;
+ VG_(OSet_Insert)( instrInfoTable, sbInfo );
distinct_instrs++;
- return bbInfo;
+ return sbInfo;
}
InstrInfo* setup_InstrInfo ( CgState* cgs, Addr instr_addr, UInt instr_len )
{
InstrInfo* i_node;
- tl_assert(cgs->bbInfo_i >= 0);
- tl_assert(cgs->bbInfo_i < cgs->bbInfo->n_instrs);
- i_node = &cgs->bbInfo->instrs[ cgs->bbInfo_i ];
+ tl_assert(cgs->sbInfo_i >= 0);
+ tl_assert(cgs->sbInfo_i < cgs->sbInfo->n_instrs);
+ i_node = &cgs->sbInfo->instrs[ cgs->sbInfo_i ];
i_node->instr_addr = instr_addr;
i_node->instr_len = instr_len;
i_node->parent = get_lineCC(instr_addr);
- cgs->bbInfo_i++;
+ cgs->sbInfo_i++;
return i_node;
}
/* Generate code for all outstanding memory events, and mark the queue
empty. Code is generated into cgs->bbOut, and this activity
- 'consumes' slots in cgs->bbInfo. */
+ 'consumes' slots in cgs->sbInfo. */
static void flushEvents ( CgState* cgs )
{
di = unsafeIRDirty_0_N( regparms,
helperName, VG_(fnptr_to_fnentry)( helperAddr ),
argv );
- addStmtToIRBB( cgs->bbOut, IRStmt_Dirty(di) );
+ addStmtToIRSB( cgs->sbOut, IRStmt_Dirty(di) );
}
cgs->events_used = 0;
static
-IRBB* cg_instrument ( VgCallbackClosure* closure,
- IRBB* bbIn,
+IRSB* cg_instrument ( VgCallbackClosure* closure,
+ IRSB* sbIn,
VexGuestLayout* layout,
VexGuestExtents* vge,
IRType gWordTy, IRType hWordTy )
IRStmt* st;
Addr64 cia; /* address of current insn */
CgState cgs;
- IRTypeEnv* tyenv = bbIn->tyenv;
+ IRTypeEnv* tyenv = sbIn->tyenv;
InstrInfo* curr_inode = NULL;
if (gWordTy != hWordTy) {
VG_(tool_panic)("host/guest word size mismatch");
}
- // Set up new BB
- cgs.bbOut = dopyIRBBExceptStmts(bbIn);
+ // Set up new SB
+ cgs.sbOut = deepCopyIRSBExceptStmts(sbIn);
// Copy verbatim any IR preamble preceding the first IMark
i = 0;
- while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
- addStmtToIRBB( cgs.bbOut, bbIn->stmts[i] );
+ while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) {
+ addStmtToIRSB( cgs.sbOut, sbIn->stmts[i] );
i++;
}
// Get the first statement, and initial cia from it
- tl_assert(bbIn->stmts_used > 0);
- tl_assert(i < bbIn->stmts_used);
- st = bbIn->stmts[i];
+ tl_assert(sbIn->stmts_used > 0);
+ tl_assert(i < sbIn->stmts_used);
+ st = sbIn->stmts[i];
tl_assert(Ist_IMark == st->tag);
cia = st->Ist.IMark.addr;
// Set up running state and get block info
tl_assert(closure->readdr == vge->base[0]);
cgs.events_used = 0;
- cgs.bbInfo = get_BB_info(bbIn, (Addr)closure->readdr);
- cgs.bbInfo_i = 0;
+ cgs.sbInfo = get_SB_info(sbIn, (Addr)closure->readdr);
+ cgs.sbInfo_i = 0;
if (DEBUG_CG)
VG_(printf)("\n\n---------- cg_instrument ----------\n");
// Traverse the block, initialising inodes, adding events and flushing as
// necessary.
- for (/*use current i*/; i < bbIn->stmts_used; i++) {
+ for (/*use current i*/; i < sbIn->stmts_used; i++) {
- st = bbIn->stmts[i];
+ st = sbIn->stmts[i];
tl_assert(isFlatIRStmt(st));
switch (st->tag) {
addEvent_Ir( &cgs, curr_inode );
break;
- case Ist_Tmp: {
- IRExpr* data = st->Ist.Tmp.data;
+ case Ist_WrTmp: {
+ IRExpr* data = st->Ist.WrTmp.data;
if (data->tag == Iex_Load) {
IRExpr* aexpr = data->Iex.Load.addr;
// Note also, endianness info is ignored. I guess
}
/* Copy the original statement */
- addStmtToIRBB( cgs.bbOut, st );
+ addStmtToIRSB( cgs.sbOut, st );
if (DEBUG_CG) {
ppIRStmt(st);
flushEvents( &cgs );
/* done. stay sane ... */
- tl_assert(cgs.bbInfo_i == cgs.bbInfo->n_instrs);
+ tl_assert(cgs.sbInfo_i == cgs.sbInfo->n_instrs);
if (DEBUG_CG) {
VG_(printf)( "goto {");
- ppIRJumpKind(bbIn->jumpkind);
+ ppIRJumpKind(sbIn->jumpkind);
VG_(printf)( "} ");
- ppIRExpr( bbIn->next );
+ ppIRExpr( sbIn->next );
VG_(printf)( "}\n");
}
- return cgs.bbOut;
+ return cgs.sbOut;
}
/*------------------------------------------------------------*/
// any reason at all: to free up space, because the guest code was
// unmapped or modified, or for any arbitrary reason.
static
-void cg_discard_basic_block_info ( Addr64 orig_addr64, VexGuestExtents vge )
+void cg_discard_superblock_info ( Addr64 orig_addr64, VexGuestExtents vge )
{
- BB_info* bbInfo;
+ SB_info* sbInfo;
Addr orig_addr = (Addr)vge.base[0];
tl_assert(vge.n_used > 0);
// Get BB info, remove from table, free BB info. Simple! Note that we
// use orig_addr, not the first instruction address in vge.
- bbInfo = VG_(OSet_Remove)(instrInfoTable, &orig_addr);
- tl_assert(NULL != bbInfo);
- VG_(OSet_FreeNode)(instrInfoTable, bbInfo);
+ sbInfo = VG_(OSet_Remove)(instrInfoTable, &orig_addr);
+ tl_assert(NULL != sbInfo);
+ VG_(OSet_FreeNode)(instrInfoTable, sbInfo);
}
/*--------------------------------------------------------------------*/
cg_instrument,
cg_fini);
- VG_(needs_basic_block_discards)(cg_discard_basic_block_info);
+ VG_(needs_superblock_discards)(cg_discard_superblock_info);
VG_(needs_command_line_options)(cg_process_cmd_line_option,
cg_print_usage,
cg_print_debug_usage);
* bbIn==0 is possible for artifical BB without real code.
* Such a BB is created when returning to an unknown function.
*/
-BB* CLG_(get_bb)(Addr addr, IRBB* bbIn, /*OUT*/ Bool *seen_before)
+BB* CLG_(get_bb)(Addr addr, IRSB* bbIn, /*OUT*/ Bool *seen_before)
{
BB* bb;
obj_node* obj;
/* from main.c */
Bool CLG_(get_debug_info)(Addr, Char filename[FILENAME_LEN],
Char fn_name[FN_NAME_LEN], UInt*, SegInfo**);
-void CLG_(collectBlockInfo)(IRBB* bbIn, UInt*, UInt*, Bool*);
+void CLG_(collectBlockInfo)(IRSB* bbIn, UInt*, UInt*, Bool*);
void CLG_(set_instrument_state)(Char*,Bool);
void CLG_(dump_profile)(Char* trigger,Bool only_current_thread);
void CLG_(zero_all_cost)(Bool only_current_thread);
/* from bb.c */
void CLG_(init_bb_hash)(void);
bb_hash* CLG_(get_bb_hash)(void);
-BB* CLG_(get_bb)(Addr addr, IRBB* bb_in, Bool *seen_before);
+BB* CLG_(get_bb)(Addr addr, IRSB* bb_in, Bool *seen_before);
void CLG_(delete_bb)(Addr addr);
static __inline__ Addr bb_addr(BB* bb)
}
static
-EventSet* insert_simcall(IRBB* bbOut, InstrInfo* ii, UInt dataSize,
+EventSet* insert_simcall(IRSB* bbOut, InstrInfo* ii, UInt dataSize,
Bool instrIssued,
IRExpr* loadAddrExpr, IRExpr* storeAddrExpr)
{
di = unsafeIRDirty_0_N( argc, helperName,
VG_(fnptr_to_fnentry)( helperAddr ), argv);
- addStmtToIRBB( bbOut, IRStmt_Dirty(di) );
+ addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
return es;
}
* Fills the InstrInfo struct if not seen before
*/
static
-void endOfInstr(IRBB* bbOut, InstrInfo* ii, Bool bb_seen_before,
+void endOfInstr(IRSB* bbOut, InstrInfo* ii, Bool bb_seen_before,
UInt instr_offset, UInt instrLen, UInt dataSize,
UInt* cost_offset, Bool instrIssued,
IRExpr* loadAddrExpr, IRExpr* storeAddrExpr)
*
* Called from CLG_(get_bb)
*/
-void CLG_(collectBlockInfo)(IRBB* bbIn,
+void CLG_(collectBlockInfo)(IRSB* bbIn,
/*INOUT*/ UInt* instrs,
/*INOUT*/ UInt* cjmps,
/*INOUT*/ Bool* cjmp_inverted)
}
static
-void collectStatementInfo(IRTypeEnv* tyenv, IRBB* bbOut, IRStmt* st,
+void collectStatementInfo(IRTypeEnv* tyenv, IRSB* bbOut, IRStmt* st,
Addr* instrAddr, UInt* instrLen,
IRExpr** loadAddrExpr, IRExpr** storeAddrExpr,
UInt* dataSize, IRType hWordTy)
*instrLen = st->Ist.IMark.len;
break;
- case Ist_Tmp: {
- IRExpr* data = st->Ist.Tmp.data;
+ case Ist_WrTmp: {
+ IRExpr* data = st->Ist.WrTmp.data;
if (data->tag == Iex_Load) {
IRExpr* aexpr = data->Iex.Load.addr;
CLG_ASSERT( isIRAtom(aexpr) );
}
static
-void addConstMemStoreStmt( IRBB* bbOut, UWord addr, UInt val, IRType hWordTy)
+void addConstMemStoreStmt( IRSB* bbOut, UWord addr, UInt val, IRType hWordTy)
{
- addStmtToIRBB( bbOut,
+ addStmtToIRSB( bbOut,
IRStmt_Store(CLGEndness,
IRExpr_Const(hWordTy == Ity_I32 ?
IRConst_U32( addr ) :
}
static
-IRBB* CLG_(instrument)( VgCallbackClosure* closure,
- IRBB* bbIn,
+IRSB* CLG_(instrument)( VgCallbackClosure* closure,
+ IRSB* bbIn,
VexGuestLayout* layout,
VexGuestExtents* vge,
IRType gWordTy, IRType hWordTy )
{
Int i;
- IRBB* bbOut;
+ IRSB* bbOut;
IRStmt* st, *stnext;
Addr instrAddr, origAddr;
UInt instrLen = 0, dataSize;
CLG_DEBUG(3, "+ instrument(BB %p)\n", (Addr)closure->readdr);
- /* Set up BB for instrumented IR */
- bbOut = dopyIRBBExceptStmts(bbIn);
+ /* Set up SB for instrumented IR */
+ bbOut = deepCopyIRSBExceptStmts(bbIn);
// Copy verbatim any IR preamble preceding the first IMark
i = 0;
while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
- addStmtToIRBB( bbOut, bbIn->stmts[i] );
+ addStmtToIRSB( bbOut, bbIn->stmts[i] );
i++;
}
di = unsafeIRDirty_0_N( 1, "setup_bbcc",
VG_(fnptr_to_fnentry)( & CLG_(setup_bbcc) ),
argv);
- addStmtToIRBB( bbOut, IRStmt_Dirty(di) );
+ addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
instrCount = 0;
costOffset = 0;
cJumps++;
}
- addStmtToIRBB( bbOut, st );
+ addStmtToIRSB( bbOut, st );
st = stnext;
}
while (!beforeIBoundary);
// any reason at all: to free up space, because the guest code was
// unmapped or modified, or for any arbitrary reason.
static
-void clg_discard_basic_block_info ( Addr64 orig_addr64, VexGuestExtents vge )
+void clg_discard_superblock_info ( Addr64 orig_addr64, VexGuestExtents vge )
{
Addr orig_addr = (Addr)orig_addr64;
tl_assert(vge.n_used > 0);
if (0)
- VG_(printf)( "discard_basic_block_info: %p, %p, %llu\n",
+ VG_(printf)( "discard_superblock_info: %p, %p, %llu\n",
(void*)(Addr)orig_addr,
(void*)(Addr)vge.base[0], (ULong)vge.len[0]);
CLG_(instrument),
CLG_(fini));
- VG_(needs_basic_block_discards)(clg_discard_basic_block_info);
+ VG_(needs_superblock_discards)(clg_discard_superblock_info);
VG_(needs_command_line_options)(CLG_(process_cmd_line_option),
void VG_(basic_tool_funcs)(
void(*post_clo_init)(void),
- IRBB*(*instrument)(VgCallbackClosure*, IRBB*,
+ IRSB*(*instrument)(VgCallbackClosure*, IRSB*,
VexGuestLayout*, VexGuestExtents*, IRType, IRType),
void(*fini)(Int)
)
.core_errors = False,
.tool_errors = False,
.libc_freeres = False,
- .basic_block_discards = False,
+ .superblock_discards = False,
.command_line_options = False,
.client_requests = False,
.syscall_wrapper = False,
NEEDS(data_syms)
NEEDS(xml_output)
-void VG_(needs_basic_block_discards)(
+void VG_(needs_superblock_discards)(
void (*discard)(Addr64, VexGuestExtents)
)
{
- VG_(needs).basic_block_discards = True;
- VG_(tdict).tool_discard_basic_block_info = discard;
+ VG_(needs).superblock_discards = True;
+ VG_(tdict).tool_discard_superblock_info = discard;
}
void VG_(needs_tool_errors)(
we fall back to the case that handles an unknown SP change.
*/
static
-IRBB* vg_SP_update_pass ( void* closureV,
- IRBB* bb_in,
+IRSB* vg_SP_update_pass ( void* closureV,
+ IRSB* sb_in,
VexGuestLayout* layout,
VexGuestExtents* vge,
IRType gWordTy,
IRType hWordTy )
{
- Int i, j, minoff_ST, maxoff_ST, sizeof_SP, offset_SP;
- IRDirty *dcall, *d;
- IRStmt* st;
- IRExpr* e;
- IRArray* descr;
- IRType typeof_SP;
- Long delta, con;
+ Int i, j, minoff_ST, maxoff_ST, sizeof_SP, offset_SP;
+ IRDirty *dcall, *d;
+ IRStmt* st;
+ IRExpr* e;
+ IRRegArray* descr;
+ IRType typeof_SP;
+ Long delta, con;
/* Set up BB */
- IRBB* bb = emptyIRBB();
- bb->tyenv = dopyIRTypeEnv(bb_in->tyenv);
- bb->next = dopyIRExpr(bb_in->next);
- bb->jumpkind = bb_in->jumpkind;
+ IRSB* bb = emptyIRSB();
+ bb->tyenv = deepCopyIRTypeEnv(sb_in->tyenv);
+ bb->next = deepCopyIRExpr(sb_in->next);
+ bb->jumpkind = sb_in->jumpkind;
delta = 0;
"track_" #kind "_mem_stack_" #syze, \
VG_(fnptr_to_fnentry)( \
VG_(tdict).track_##kind##_mem_stack_##syze ), \
- mkIRExprVec_1(IRExpr_Tmp(tmpp)) \
+ mkIRExprVec_1(IRExpr_RdTmp(tmpp)) \
); \
dcall->nFxState = 1; \
dcall->fxState[0].fx = Ifx_Read; \
dcall->fxState[0].offset = layout->offset_SP; \
dcall->fxState[0].size = layout->sizeof_SP; \
\
- addStmtToIRBB( bb, IRStmt_Dirty(dcall) ); \
+ addStmtToIRSB( bb, IRStmt_Dirty(dcall) ); \
\
update_SP_aliases(-delta); \
\
clear_SP_aliases();
- for (i = 0; i < bb_in->stmts_used; i++) {
+ for (i = 0; i < sb_in->stmts_used; i++) {
- st = bb_in->stmts[i];
+ st = sb_in->stmts[i];
/* t = Get(sp): curr = t, delta = 0 */
- if (st->tag != Ist_Tmp) goto case2;
- e = st->Ist.Tmp.data;
+ if (st->tag != Ist_WrTmp) goto case2;
+ e = st->Ist.WrTmp.data;
if (e->tag != Iex_Get) goto case2;
if (e->Iex.Get.offset != offset_SP) goto case2;
if (e->Iex.Get.ty != typeof_SP) goto case2;
- add_SP_alias(st->Ist.Tmp.tmp, 0);
- addStmtToIRBB( bb, st );
+ add_SP_alias(st->Ist.WrTmp.tmp, 0);
+ addStmtToIRSB( bb, st );
continue;
case2:
/* t' = curr +/- const: curr = t', delta +=/-= const */
- if (st->tag != Ist_Tmp) goto case3;
- e = st->Ist.Tmp.data;
+ if (st->tag != Ist_WrTmp) goto case3;
+ e = st->Ist.WrTmp.data;
if (e->tag != Iex_Binop) goto case3;
- if (e->Iex.Binop.arg1->tag != Iex_Tmp) goto case3;
- if (!get_SP_delta(e->Iex.Binop.arg1->Iex.Tmp.tmp, &delta)) goto case3;
+ if (e->Iex.Binop.arg1->tag != Iex_RdTmp) goto case3;
+ if (!get_SP_delta(e->Iex.Binop.arg1->Iex.RdTmp.tmp, &delta)) goto case3;
if (e->Iex.Binop.arg2->tag != Iex_Const) goto case3;
if (!IS_ADD_OR_SUB(e->Iex.Binop.op)) goto case3;
con = GET_CONST(e->Iex.Binop.arg2->Iex.Const.con);
if (IS_ADD(e->Iex.Binop.op)) {
- add_SP_alias(st->Ist.Tmp.tmp, delta + con);
+ add_SP_alias(st->Ist.WrTmp.tmp, delta + con);
} else {
- add_SP_alias(st->Ist.Tmp.tmp, delta - con);
+ add_SP_alias(st->Ist.WrTmp.tmp, delta - con);
}
- addStmtToIRBB( bb, st );
+ addStmtToIRSB( bb, st );
continue;
case3:
/* t' = curr: curr = t' */
- if (st->tag != Ist_Tmp) goto case4;
- e = st->Ist.Tmp.data;
- if (e->tag != Iex_Tmp) goto case4;
- if (!get_SP_delta(e->Iex.Tmp.tmp, &delta)) goto case4;
- add_SP_alias(st->Ist.Tmp.tmp, delta);
- addStmtToIRBB( bb, st );
+ if (st->tag != Ist_WrTmp) goto case4;
+ e = st->Ist.WrTmp.data;
+ if (e->tag != Iex_RdTmp) goto case4;
+ if (!get_SP_delta(e->Iex.RdTmp.tmp, &delta)) goto case4;
+ add_SP_alias(st->Ist.WrTmp.tmp, delta);
+ addStmtToIRSB( bb, st );
continue;
case4:
/* Put(sp) = curr */
if (st->tag != Ist_Put) goto case5;
if (st->Ist.Put.offset != offset_SP) goto case5;
- if (st->Ist.Put.data->tag != Iex_Tmp) goto case5;
- if (get_SP_delta(st->Ist.Put.data->Iex.Tmp.tmp, &delta)) {
- IRTemp tttmp = st->Ist.Put.data->Iex.Tmp.tmp;
+ if (st->Ist.Put.data->tag != Iex_RdTmp) goto case5;
+ if (get_SP_delta(st->Ist.Put.data->Iex.RdTmp.tmp, &delta)) {
+ IRTemp tttmp = st->Ist.Put.data->Iex.RdTmp.tmp;
switch (delta) {
- case 0: addStmtToIRBB(bb,st); continue;
- case 4: DO(die, 4, tttmp); addStmtToIRBB(bb,st); continue;
- case -4: DO(new, 4, tttmp); addStmtToIRBB(bb,st); continue;
- case 8: DO(die, 8, tttmp); addStmtToIRBB(bb,st); continue;
- case -8: DO(new, 8, tttmp); addStmtToIRBB(bb,st); continue;
- case 12: DO(die, 12, tttmp); addStmtToIRBB(bb,st); continue;
- case -12: DO(new, 12, tttmp); addStmtToIRBB(bb,st); continue;
- case 16: DO(die, 16, tttmp); addStmtToIRBB(bb,st); continue;
- case -16: DO(new, 16, tttmp); addStmtToIRBB(bb,st); continue;
- case 32: DO(die, 32, tttmp); addStmtToIRBB(bb,st); continue;
- case -32: DO(new, 32, tttmp); addStmtToIRBB(bb,st); continue;
- case 112: DO(die, 112, tttmp); addStmtToIRBB(bb,st); continue;
- case -112: DO(new, 112, tttmp); addStmtToIRBB(bb,st); continue;
- case 128: DO(die, 128, tttmp); addStmtToIRBB(bb,st); continue;
- case -128: DO(new, 128, tttmp); addStmtToIRBB(bb,st); continue;
- case 144: DO(die, 144, tttmp); addStmtToIRBB(bb,st); continue;
- case -144: DO(new, 144, tttmp); addStmtToIRBB(bb,st); continue;
- case 160: DO(die, 160, tttmp); addStmtToIRBB(bb,st); continue;
- case -160: DO(new, 160, tttmp); addStmtToIRBB(bb,st); continue;
+ case 0: addStmtToIRSB(bb,st); continue;
+ case 4: DO(die, 4, tttmp); addStmtToIRSB(bb,st); continue;
+ case -4: DO(new, 4, tttmp); addStmtToIRSB(bb,st); continue;
+ case 8: DO(die, 8, tttmp); addStmtToIRSB(bb,st); continue;
+ case -8: DO(new, 8, tttmp); addStmtToIRSB(bb,st); continue;
+ case 12: DO(die, 12, tttmp); addStmtToIRSB(bb,st); continue;
+ case -12: DO(new, 12, tttmp); addStmtToIRSB(bb,st); continue;
+ case 16: DO(die, 16, tttmp); addStmtToIRSB(bb,st); continue;
+ case -16: DO(new, 16, tttmp); addStmtToIRSB(bb,st); continue;
+ case 32: DO(die, 32, tttmp); addStmtToIRSB(bb,st); continue;
+ case -32: DO(new, 32, tttmp); addStmtToIRSB(bb,st); continue;
+ case 112: DO(die, 112, tttmp); addStmtToIRSB(bb,st); continue;
+ case -112: DO(new, 112, tttmp); addStmtToIRSB(bb,st); continue;
+ case 128: DO(die, 128, tttmp); addStmtToIRSB(bb,st); continue;
+ case -128: DO(new, 128, tttmp); addStmtToIRSB(bb,st); continue;
+ case 144: DO(die, 144, tttmp); addStmtToIRSB(bb,st); continue;
+ case -144: DO(new, 144, tttmp); addStmtToIRSB(bb,st); continue;
+ case 160: DO(die, 160, tttmp); addStmtToIRSB(bb,st); continue;
+ case -160: DO(new, 160, tttmp); addStmtToIRSB(bb,st); continue;
default:
/* common values for ppc64: 144 128 160 112 176 */
n_SP_updates_generic_known++;
generic:
/* Pass both the old and new SP values to this helper. */
old_SP = newIRTemp(bb->tyenv, typeof_SP);
- addStmtToIRBB(
+ addStmtToIRSB(
bb,
- IRStmt_Tmp( old_SP, IRExpr_Get(offset_SP, typeof_SP) )
+ IRStmt_WrTmp( old_SP, IRExpr_Get(offset_SP, typeof_SP) )
);
dcall = unsafeIRDirty_0_N(
2/*regparms*/,
"VG_(unknown_SP_update)",
VG_(fnptr_to_fnentry)( &VG_(unknown_SP_update) ),
- mkIRExprVec_2( IRExpr_Tmp(old_SP), st->Ist.Put.data )
+ mkIRExprVec_2( IRExpr_RdTmp(old_SP), st->Ist.Put.data )
);
- addStmtToIRBB( bb, IRStmt_Dirty(dcall) );
+ addStmtToIRSB( bb, IRStmt_Dirty(dcall) );
- addStmtToIRBB( bb, st );
+ addStmtToIRSB( bb, st );
clear_SP_aliases();
- add_SP_alias(st->Ist.Put.data->Iex.Tmp.tmp, 0);
+ add_SP_alias(st->Ist.Put.data->Iex.RdTmp.tmp, 0);
continue;
}
}
/* well, not interesting. Just copy and keep going. */
- addStmtToIRBB( bb, st );
+ addStmtToIRSB( bb, st );
- } /* for (i = 0; i < bb_in->stmts_used; i++) */
+ } /* for (i = 0; i < sb_in->stmts_used; i++) */
return bb;
redir stack, checking for stack overflow and generating code to
bomb out if so. */
-static void gen_PUSH ( IRBB* bb, IRExpr* e )
+static void gen_PUSH ( IRSB* bb, IRExpr* e )
{
- IRArray* descr;
- IRTemp t1;
- IRExpr* one;
+ IRRegArray* descr;
+ IRTemp t1;
+ IRExpr* one;
# if defined(VGP_ppc64_linux) || defined(VGP_ppc64_aix5)
Int stack_size = VEX_GUEST_PPC64_REDIR_STACK_SIZE;
vg_assert(sizeof(Word) == VG_WORDSIZE);
vg_assert(sizeof(Addr) == VG_WORDSIZE);
- descr = mkIRArray( offB_REDIR_STACK, ty_Word, stack_size );
+ descr = mkIRRegArray( offB_REDIR_STACK, ty_Word, stack_size );
t1 = newIRTemp( bb->tyenv, ty_Word );
one = mkU(1);
vg_assert(typeOfIRExpr(bb->tyenv, e) == ty_Word);
/* t1 = guest_REDIR_SP + 1 */
- addStmtToIRBB(
+ addStmtToIRSB(
bb,
- IRStmt_Tmp(
+ IRStmt_WrTmp(
t1,
IRExpr_Binop(op_Add, IRExpr_Get( offB_REDIR_SP, ty_Word ), one)
)
this is an unrecoverable error and will lead to Valgrind
shutting down. _EMWARN is set regardless - that's harmless
since is only has a meaning if the exit is taken. */
- addStmtToIRBB(
+ addStmtToIRSB(
bb,
IRStmt_Put(offB_EMWARN, mkU32(EmWarn_PPC64_redir_overflow))
);
- addStmtToIRBB(
+ addStmtToIRSB(
bb,
IRStmt_Exit(
IRExpr_Binop(
op_CmpNE,
IRExpr_Binop(
op_Sar,
- IRExpr_Binop(op_Sub,mkU(stack_size-1),IRExpr_Tmp(t1)),
+ IRExpr_Binop(op_Sub,mkU(stack_size-1),IRExpr_RdTmp(t1)),
mkU8(8 * VG_WORDSIZE - 1)
),
mkU(0)
);
/* guest_REDIR_SP = t1 */
- addStmtToIRBB(bb, IRStmt_Put(offB_REDIR_SP, IRExpr_Tmp(t1)));
+ addStmtToIRSB(bb, IRStmt_Put(offB_REDIR_SP, IRExpr_RdTmp(t1)));
/* guest_REDIR_STACK[t1+0] = e */
/* PutI/GetI have I32-typed indexes regardless of guest word size */
- addStmtToIRBB(
+ addStmtToIRSB(
bb,
- IRStmt_PutI(descr, narrowTo32(bb->tyenv,IRExpr_Tmp(t1)), 0, e)
+ IRStmt_PutI(descr, narrowTo32(bb->tyenv,IRExpr_RdTmp(t1)), 0, e)
);
}
stack, binding it to a new temporary, which is returned. As with
gen_PUSH, an overflow check is also performed. */
-static IRTemp gen_POP ( IRBB* bb )
+static IRTemp gen_POP ( IRSB* bb )
{
# if defined(VGP_ppc64_linux) || defined(VGP_ppc64_aix5)
Int stack_size = VEX_GUEST_PPC64_REDIR_STACK_SIZE;
IRExpr*(*mkU)(UInt) = mkU32;
# endif
- IRArray* descr = mkIRArray( offB_REDIR_STACK, ty_Word, stack_size );
- IRTemp t1 = newIRTemp( bb->tyenv, ty_Word );
- IRTemp res = newIRTemp( bb->tyenv, ty_Word );
- IRExpr* one = mkU(1);
+ IRRegArray* descr = mkIRRegArray( offB_REDIR_STACK, ty_Word, stack_size );
+ IRTemp t1 = newIRTemp( bb->tyenv, ty_Word );
+ IRTemp res = newIRTemp( bb->tyenv, ty_Word );
+ IRExpr* one = mkU(1);
vg_assert(sizeof(void*) == VG_WORDSIZE);
vg_assert(sizeof(Word) == VG_WORDSIZE);
vg_assert(sizeof(Addr) == VG_WORDSIZE);
/* t1 = guest_REDIR_SP */
- addStmtToIRBB(
+ addStmtToIRSB(
bb,
- IRStmt_Tmp( t1, IRExpr_Get( offB_REDIR_SP, ty_Word ) )
+ IRStmt_WrTmp( t1, IRExpr_Get( offB_REDIR_SP, ty_Word ) )
);
/* Bomb out if t1 < 0. Same comments as gen_PUSH apply. */
- addStmtToIRBB(
+ addStmtToIRSB(
bb,
IRStmt_Put(offB_EMWARN, mkU32(EmWarn_PPC64_redir_underflow))
);
- addStmtToIRBB(
+ addStmtToIRSB(
bb,
IRStmt_Exit(
IRExpr_Binop(
op_CmpNE,
IRExpr_Binop(
op_Sar,
- IRExpr_Tmp(t1),
+ IRExpr_RdTmp(t1),
mkU8(8 * VG_WORDSIZE - 1)
),
mkU(0)
/* res = guest_REDIR_STACK[t1+0] */
/* PutI/GetI have I32-typed indexes regardless of guest word size */
- addStmtToIRBB(
+ addStmtToIRSB(
bb,
- IRStmt_Tmp(
+ IRStmt_WrTmp(
res,
- IRExpr_GetI(descr, narrowTo32(bb->tyenv,IRExpr_Tmp(t1)), 0)
+ IRExpr_GetI(descr, narrowTo32(bb->tyenv,IRExpr_RdTmp(t1)), 0)
)
);
/* guest_REDIR_SP = t1-1 */
- addStmtToIRBB(
+ addStmtToIRSB(
bb,
- IRStmt_Put(offB_REDIR_SP, IRExpr_Binop(op_Sub, IRExpr_Tmp(t1), one))
+ IRStmt_Put(offB_REDIR_SP, IRExpr_Binop(op_Sub, IRExpr_RdTmp(t1), one))
);
return res;
intercept the return and restore R2 and L2 to the values saved
here. */
-static void gen_push_and_set_LR_R2 ( IRBB* bb, Addr64 new_R2_value )
+static void gen_push_and_set_LR_R2 ( IRSB* bb, Addr64 new_R2_value )
{
# if defined(VGP_ppc64_linux) || defined(VGP_ppc64_aix5)
Addr64 bogus_RA = (Addr64)&VG_(ppctoc_magic_redirect_return_stub);
Int offB_LR = offsetof(VexGuestPPC64State,guest_LR);
gen_PUSH( bb, IRExpr_Get(offB_LR, Ity_I64) );
gen_PUSH( bb, IRExpr_Get(offB_GPR2, Ity_I64) );
- addStmtToIRBB( bb, IRStmt_Put( offB_LR, mkU64( bogus_RA )) );
- addStmtToIRBB( bb, IRStmt_Put( offB_GPR2, mkU64( new_R2_value )) );
+ addStmtToIRSB( bb, IRStmt_Put( offB_LR, mkU64( bogus_RA )) );
+ addStmtToIRSB( bb, IRStmt_Put( offB_GPR2, mkU64( new_R2_value )) );
# elif defined(VGP_ppc32_aix5)
Addr32 bogus_RA = (Addr32)&VG_(ppctoc_magic_redirect_return_stub);
Int offB_LR = offsetof(VexGuestPPC32State,guest_LR);
gen_PUSH( bb, IRExpr_Get(offB_LR, Ity_I32) );
gen_PUSH( bb, IRExpr_Get(offB_GPR2, Ity_I32) );
- addStmtToIRBB( bb, IRStmt_Put( offB_LR, mkU32( bogus_RA )) );
- addStmtToIRBB( bb, IRStmt_Put( offB_GPR2, mkU32( new_R2_value )) );
+ addStmtToIRSB( bb, IRStmt_Put( offB_LR, mkU32( bogus_RA )) );
+ addStmtToIRSB( bb, IRStmt_Put( offB_GPR2, mkU32( new_R2_value )) );
# else
# error Platform is not TOC-afflicted, fortunately
# endif
}
-static void gen_pop_R2_LR_then_bLR ( IRBB* bb )
+static void gen_pop_R2_LR_then_bLR ( IRSB* bb )
{
# if defined(VGP_ppc64_linux) || defined(VGP_ppc64_aix5)
Int offB_GPR2 = offsetof(VexGuestPPC64State,guest_GPR2);
IRTemp old_LR = newIRTemp( bb->tyenv, Ity_I64 );
/* Restore R2 */
old_R2 = gen_POP( bb );
- addStmtToIRBB( bb, IRStmt_Put( offB_GPR2, IRExpr_Tmp(old_R2)) );
+ addStmtToIRSB( bb, IRStmt_Put( offB_GPR2, IRExpr_RdTmp(old_R2)) );
/* Restore LR */
old_LR = gen_POP( bb );
- addStmtToIRBB( bb, IRStmt_Put( offB_LR, IRExpr_Tmp(old_LR)) );
+ addStmtToIRSB( bb, IRStmt_Put( offB_LR, IRExpr_RdTmp(old_LR)) );
/* Branch to LR */
/* re boring, we arrived here precisely because a wrapped fn did a
blr (hence Ijk_Ret); so we should just mark this jump as Boring,
else one _Call will have resulted in two _Rets. */
bb->jumpkind = Ijk_Boring;
- bb->next = IRExpr_Binop(Iop_And64, IRExpr_Tmp(old_LR), mkU64(~(3ULL)));
+ bb->next = IRExpr_Binop(Iop_And64, IRExpr_RdTmp(old_LR), mkU64(~(3ULL)));
# elif defined(VGP_ppc32_aix5)
Int offB_GPR2 = offsetof(VexGuestPPC32State,guest_GPR2);
IRTemp old_LR = newIRTemp( bb->tyenv, Ity_I32 );
/* Restore R2 */
old_R2 = gen_POP( bb );
- addStmtToIRBB( bb, IRStmt_Put( offB_GPR2, IRExpr_Tmp(old_R2)) );
+ addStmtToIRSB( bb, IRStmt_Put( offB_GPR2, IRExpr_RdTmp(old_R2)) );
/* Restore LR */
old_LR = gen_POP( bb );
- addStmtToIRBB( bb, IRStmt_Put( offB_LR, IRExpr_Tmp(old_LR)) );
+ addStmtToIRSB( bb, IRStmt_Put( offB_LR, IRExpr_RdTmp(old_LR)) );
/* Branch to LR */
/* re boring, we arrived here precisely because a wrapped fn did a
blr (hence Ijk_Ret); so we should just mark this jump as Boring,
else one _Call will have resulted in two _Rets. */
bb->jumpkind = Ijk_Boring;
- bb->next = IRExpr_Binop(Iop_And32, IRExpr_Tmp(old_LR), mkU32(~3));
+ bb->next = IRExpr_Binop(Iop_And32, IRExpr_RdTmp(old_LR), mkU32(~3));
# else
# error Platform is not TOC-afflicted, fortunately
}
static
-Bool mk_preamble__ppctoc_magic_return_stub ( void* closureV, IRBB* bb )
+Bool mk_preamble__ppctoc_magic_return_stub ( void* closureV, IRSB* bb )
{
VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
- /* Since we're creating the entire IRBB right here, give it a
+ /* Since we're creating the entire IRSB right here, give it a
proper IMark, as it won't get one any other way, and cachegrind
will barf if it doesn't have one (fair enough really). */
- addStmtToIRBB( bb, IRStmt_IMark( closure->readdr, 4 ) );
+ addStmtToIRSB( bb, IRStmt_IMark( closure->readdr, 4 ) );
/* Generate the magic sequence:
pop R2 from hidden stack
pop LR from hidden stack
return stub address, and that in that case it can get the real LR
value from the hidden stack instead. */
static
-Bool mk_preamble__set_NRADDR_to_zero ( void* closureV, IRBB* bb )
+Bool mk_preamble__set_NRADDR_to_zero ( void* closureV, IRSB* bb )
{
Int nraddr_szB
= sizeof(((VexGuestArchState*)0)->guest_NRADDR);
vg_assert(nraddr_szB == 4 || nraddr_szB == 8);
vg_assert(nraddr_szB == VG_WORDSIZE);
- addStmtToIRBB(
+ addStmtToIRSB(
bb,
IRStmt_Put(
offsetof(VexGuestArchState,guest_NRADDR),
);
# if defined(VG_PLAT_USES_PPCTOC)
{ VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
- addStmtToIRBB(
+ addStmtToIRSB(
bb,
IRStmt_Put(
offsetof(VexGuestArchState,guest_NRADDR_GPR2),
can read _NRADDR and find the address of the function being
wrapped. On toc-afflicted platforms we must also snarf r2. */
static
-Bool mk_preamble__set_NRADDR_to_nraddr ( void* closureV, IRBB* bb )
+Bool mk_preamble__set_NRADDR_to_nraddr ( void* closureV, IRSB* bb )
{
VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
Int nraddr_szB
= sizeof(((VexGuestArchState*)0)->guest_NRADDR);
vg_assert(nraddr_szB == 4 || nraddr_szB == 8);
vg_assert(nraddr_szB == VG_WORDSIZE);
- addStmtToIRBB(
+ addStmtToIRSB(
bb,
IRStmt_Put(
offsetof(VexGuestArchState,guest_NRADDR),
);
# if defined(VGP_ppc64_linux) || defined(VGP_ppc32_aix5) \
|| defined(VGP_ppc64_aix5)
- addStmtToIRBB(
+ addStmtToIRSB(
bb,
IRStmt_Put(
offsetof(VexGuestArchState,guest_NRADDR_GPR2),
Int tmpbuf_used, verbosity, i;
Bool notrace_until_done, do_self_check;
UInt notrace_until_limit = 0;
- Bool (*preamble_fn)(void*,IRBB*);
+ Bool (*preamble_fn)(void*,IRSB*);
VexArch vex_arch;
VexArchInfo vex_archinfo;
- VexMiscInfo vex_miscinfo;
+ VexAbiInfo vex_abiinfo;
VexGuestExtents vge;
VexTranslateArgs vta;
VexTranslateResult tres;
/* Get the CPU info established at startup. */
VG_(machine_get_VexArchInfo)( &vex_arch, &vex_archinfo );
- /* Set up 'misc info' structure with stuff Vex needs to know about
+ /* Set up 'abiinfo' structure with stuff Vex needs to know about
the guest and host ABIs. */
- LibVEX_default_VexMiscInfo( &vex_miscinfo );
- vex_miscinfo.guest_stack_redzone_size = VG_STACK_REDZONE_SZB;
+ LibVEX_default_VexAbiInfo( &vex_abiinfo );
+ vex_abiinfo.guest_stack_redzone_size = VG_STACK_REDZONE_SZB;
# if defined(VGP_ppc32_linux)
- vex_miscinfo.guest_ppc_zap_RZ_at_blr = False;
- vex_miscinfo.guest_ppc_zap_RZ_at_bl = NULL;
- vex_miscinfo.host_ppc32_regalign_int64_args = True;
+ vex_abiinfo.guest_ppc_zap_RZ_at_blr = False;
+ vex_abiinfo.guest_ppc_zap_RZ_at_bl = NULL;
+ vex_abiinfo.host_ppc32_regalign_int64_args = True;
# endif
# if defined(VGP_ppc64_linux)
- vex_miscinfo.guest_ppc_zap_RZ_at_blr = True;
- vex_miscinfo.guest_ppc_zap_RZ_at_bl = const_True;
- vex_miscinfo.host_ppc_calls_use_fndescrs = True;
+ vex_abiinfo.guest_ppc_zap_RZ_at_blr = True;
+ vex_abiinfo.guest_ppc_zap_RZ_at_bl = const_True;
+ vex_abiinfo.host_ppc_calls_use_fndescrs = True;
# endif
# if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
- vex_miscinfo.guest_ppc_zap_RZ_at_blr = False;
- vex_miscinfo.guest_ppc_zap_RZ_at_bl = bl_RZ_zap_ok_for_AIX;
- vex_miscinfo.guest_ppc_sc_continues_at_LR = True;
- vex_miscinfo.host_ppc_calls_use_fndescrs = True;
+ vex_abiinfo.guest_ppc_zap_RZ_at_blr = False;
+ vex_abiinfo.guest_ppc_zap_RZ_at_bl = bl_RZ_zap_ok_for_AIX;
+ vex_abiinfo.guest_ppc_sc_continues_at_LR = True;
+ vex_abiinfo.host_ppc_calls_use_fndescrs = True;
# endif
/* Set up closure args. */
vta.archinfo_guest = vex_archinfo;
vta.arch_host = vex_arch;
vta.archinfo_host = vex_archinfo;
- vta.miscinfo_both = vex_miscinfo;
+ vta.abiinfo_both = vex_abiinfo;
vta.guest_bytes = (UChar*)ULong_to_Ptr(addr);
vta.guest_bytes_addr = (Addr64)addr;
vta.callback_opaque = (void*)&closure;
VgCallbackClosure*. Hence the following longwinded casts.
They are entirely legal but longwinded so as to maximise the
chance of the C typechecker picking up any type snafus. */
- IRBB*(*f)(VgCallbackClosure*,
- IRBB*,VexGuestLayout*,VexGuestExtents*,
+ IRSB*(*f)(VgCallbackClosure*,
+ IRSB*,VexGuestLayout*,VexGuestExtents*,
IRType,IRType)
= VG_(tdict).tool_instrument;
- IRBB*(*g)(void*,
- IRBB*,VexGuestLayout*,VexGuestExtents*,
+ IRSB*(*g)(void*,
+ IRSB*,VexGuestLayout*,VexGuestExtents*,
IRType,IRType)
- = (IRBB*(*)(void*,IRBB*,VexGuestLayout*,VexGuestExtents*,IRType,IRType))f;
+ = (IRSB*(*)(void*,IRSB*,VexGuestLayout*,VexGuestExtents*,IRType,IRType))f;
vta.instrument1 = g;
}
/* No need for type kludgery here. */
vg_assert(sec->tt[i].n_tte2ec <= 3);
n_dump_osize += vge_osize(&sec->tt[i].vge);
/* Tell the tool too. */
- if (VG_(needs).basic_block_discards) {
- VG_TDICT_CALL( tool_discard_basic_block_info,
+ if (VG_(needs).superblock_discards) {
+ VG_TDICT_CALL( tool_discard_superblock_info,
sec->tt[i].entry,
sec->tt[i].vge );
}
n_disc_osize += vge_osize(&tte->vge);
/* Tell the tool too. */
- if (VG_(needs).basic_block_discards) {
- VG_TDICT_CALL( tool_discard_basic_block_info,
+ if (VG_(needs).superblock_discards) {
+ VG_TDICT_CALL( tool_discard_superblock_info,
tte->entry,
tte->vge );
}
Bool libc_freeres;
Bool core_errors;
Bool tool_errors;
- Bool basic_block_discards;
+ Bool superblock_discards;
Bool command_line_options;
Bool client_requests;
Bool syscall_wrapper;
// Basic functions
void (*tool_pre_clo_init) (void);
void (*tool_post_clo_init)(void);
- IRBB* (*tool_instrument) (VgCallbackClosure*,
- IRBB*,
+ IRSB* (*tool_instrument) (VgCallbackClosure*,
+ IRSB*,
VexGuestLayout*, VexGuestExtents*,
IRType, IRType);
void (*tool_fini) (Int);
Char* (*tool_get_error_name) (Error*);
void (*tool_print_extra_suppression_info)(Error*);
- // VG_(needs).basic_block_discards
- void (*tool_discard_basic_block_info)(Addr64, VexGuestExtents);
+ // VG_(needs).superblock_discards
+ void (*tool_discard_superblock_info)(Addr64, VexGuestExtents);
// VG_(needs).command_line_options
Bool (*tool_process_cmd_line_option)(Char*);
}
#endif
static
-IRBB* hg_instrument ( VgCallbackClosure* closure,
- IRBB* bb,
+IRSB* hg_instrument ( VgCallbackClosure* closure,
+ IRSB* bb,
VexGuestLayout* layout,
VexGuestExtents* vge,
IRType gWordTy, IRType hWordTy )
// with code addresses it will get into deep trouble if it does
// make this assumption.
//
- // IRBB* bb_in is the incoming bb to be instrumented, in flat IR
- // form.
+ // IRSB* sb_in is the incoming superblock to be instrumented,
+ // in flat IR form.
//
// VexGuestLayout* layout contains limited info on the layout of
// the guest state: where the stack pointer and program counter
comment in MC_(instrument) in memcheck/mc_translate.c for
details.
*/
- IRBB*(*instrument)(VgCallbackClosure* closure,
- IRBB* bb_in,
+ IRSB*(*instrument)(VgCallbackClosure* closure,
+ IRSB* sb_in,
VexGuestLayout* layout,
VexGuestExtents* vge,
IRType gWordTy,
.so unloading, or otherwise at the discretion of m_transtab, eg
when the table becomes too full) to avoid stale information being
reused for new translations. */
-extern void VG_(needs_basic_block_discards) (
+extern void VG_(needs_superblock_discards) (
// Discard any information that pertains to specific translations
// or instructions within the address range given. There are two
// possible approaches.
// translation, and so could be covered by the "extents" of more than
// one call to this function.
// Doing it the first way (as eg. Cachegrind does) is probably easier.
- void (*discard_basic_block_info)(Addr64 orig_addr, VexGuestExtents extents)
+ void (*discard_superblock_info)(Addr64 orig_addr, VexGuestExtents extents)
);
/* Tool defines its own command line options? */
}
/* A helper that adds the instrumentation for a detail. */
-static void instrument_detail(IRBB* bb, Op op, IRType type)
+static void instrument_detail(IRSB* bb, Op op, IRType type)
{
IRDirty* di;
IRExpr** argv;
di = unsafeIRDirty_0_N( 1, "increment_detail",
VG_(fnptr_to_fnentry)( &increment_detail ),
argv);
- addStmtToIRBB( bb, IRStmt_Dirty(di) );
+ addStmtToIRSB( bb, IRStmt_Dirty(di) );
}
/* Summarize and print the details. */
}
-static void flushEvents(IRBB* bb)
+static void flushEvents(IRSB* bb)
{
Int i;
Char* helperName;
di = unsafeIRDirty_0_N( /*regparms*/2,
helperName, VG_(fnptr_to_fnentry)( helperAddr ),
argv );
- addStmtToIRBB( bb, IRStmt_Dirty(di) );
+ addStmtToIRSB( bb, IRStmt_Dirty(di) );
}
events_used = 0;
// must still call this function, addEvent_Ir() -- it is necessary to add
// the Ir events to the events list so that merging of paired load/store
// events into modify events works correctly.
-static void addEvent_Ir ( IRBB* bb, IRAtom* iaddr, UInt isize )
+static void addEvent_Ir ( IRSB* bb, IRAtom* iaddr, UInt isize )
{
Event* evt;
tl_assert( (VG_MIN_INSTR_SZB <= isize && isize <= VG_MAX_INSTR_SZB)
}
static
-void addEvent_Dr ( IRBB* bb, IRAtom* daddr, Int dsize )
+void addEvent_Dr ( IRSB* bb, IRAtom* daddr, Int dsize )
{
Event* evt;
tl_assert(isIRAtom(daddr));
}
static
-void addEvent_Dw ( IRBB* bb, IRAtom* daddr, Int dsize )
+void addEvent_Dw ( IRSB* bb, IRAtom* daddr, Int dsize )
{
Event* lastEvt;
Event* evt;
}
static
-IRBB* lk_instrument ( VgCallbackClosure* closure,
- IRBB* bbIn,
+IRSB* lk_instrument ( VgCallbackClosure* closure,
+ IRSB* bbIn,
VexGuestLayout* layout,
VexGuestExtents* vge,
IRType gWordTy, IRType hWordTy )
{
IRDirty* di;
Int i;
- IRBB* bbOut;
+ IRSB* bbOut;
Char fnname[100];
IRType type;
IRTypeEnv* tyenv = bbIn->tyenv;
}
/* Set up BB */
- bbOut = dopyIRBBExceptStmts(bbIn);
+ bbOut = deepCopyIRSBExceptStmts(bbIn);
// Copy verbatim any IR preamble preceding the first IMark
i = 0;
while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
- addStmtToIRBB( bbOut, bbIn->stmts[i] );
+ addStmtToIRSB( bbOut, bbIn->stmts[i] );
i++;
}
di = unsafeIRDirty_0_N( 0, "add_one_BB_entered",
VG_(fnptr_to_fnentry)( &add_one_BB_entered ),
mkIRExprVec_0() );
- addStmtToIRBB( bbOut, IRStmt_Dirty(di) );
+ addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
}
if (clo_trace_mem) {
di = unsafeIRDirty_0_N( 0, "add_one_IRStmt",
VG_(fnptr_to_fnentry)( &add_one_IRStmt ),
mkIRExprVec_0() );
- addStmtToIRBB( bbOut, IRStmt_Dirty(di) );
+ addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
}
switch (st->tag) {
case Ist_Put:
case Ist_PutI:
case Ist_MFence:
- addStmtToIRBB( bbOut, st );
+ addStmtToIRSB( bbOut, st );
break;
case Ist_IMark:
di = unsafeIRDirty_0_N( 0, "add_one_guest_instr",
VG_(fnptr_to_fnentry)( &add_one_guest_instr ),
mkIRExprVec_0() );
- addStmtToIRBB( bbOut, IRStmt_Dirty(di) );
+ addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
/* An unconditional branch to a known destination in the
- * guest's instructions can be represented, in the IRBB to
+ * guest's instructions can be represented, in the IRSB to
* instrument, by the VEX statements that are the
* translation of that known destination. This feature is
* called 'BB chasing' and can be influenced by command
0, "add_one_func_call",
VG_(fnptr_to_fnentry)( &add_one_func_call ),
mkIRExprVec_0() );
- addStmtToIRBB( bbOut, IRStmt_Dirty(di) );
+ addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
}
}
if (clo_trace_mem) {
addEvent_Ir( bbOut, mkIRExpr_HWord( (HWord)st->Ist.IMark.addr ),
st->Ist.IMark.len );
}
- addStmtToIRBB( bbOut, st );
+ addStmtToIRSB( bbOut, st );
break;
- case Ist_Tmp:
+ case Ist_WrTmp:
// Add a call to trace_load() if --trace-mem=yes.
if (clo_trace_mem) {
- IRExpr* data = st->Ist.Tmp.data;
+ IRExpr* data = st->Ist.WrTmp.data;
if (data->tag == Iex_Load) {
addEvent_Dr( bbOut, data->Iex.Load.addr,
sizeofIRType(data->Iex.Load.ty) );
}
}
if (clo_detailed_counts) {
- IRExpr* expr = st->Ist.Tmp.data;
+ IRExpr* expr = st->Ist.WrTmp.data;
type = typeOfIRExpr(bbOut->tyenv, expr);
tl_assert(type != Ity_INVALID);
switch (expr->tag) {
break;
}
}
- addStmtToIRBB( bbOut, st );
+ addStmtToIRSB( bbOut, st );
break;
case Ist_Store:
tl_assert(type != Ity_INVALID);
instrument_detail( bbOut, OpStore, type );
}
- addStmtToIRBB( bbOut, st );
+ addStmtToIRSB( bbOut, st );
break;
case Ist_Dirty: {
tl_assert(d->mAddr == NULL);
tl_assert(d->mSize == 0);
}
- addStmtToIRBB( bbOut, st );
+ addStmtToIRSB( bbOut, st );
break;
}
di = unsafeIRDirty_0_N( 0, "add_one_Jcc",
VG_(fnptr_to_fnentry)( &add_one_Jcc ),
mkIRExprVec_0() );
- addStmtToIRBB( bbOut, IRStmt_Dirty(di) );
+ addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
}
if (clo_trace_mem) {
flushEvents(bbOut);
}
- addStmtToIRBB( bbOut, st ); // Original statement
+ addStmtToIRSB( bbOut, st ); // Original statement
if (clo_basic_counts) {
/* Count non-taken Jcc */
VG_(fnptr_to_fnentry)(
&add_one_Jcc_untaken ),
mkIRExprVec_0() );
- addStmtToIRBB( bbOut, IRStmt_Dirty(di) );
+ addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
}
break;
di = unsafeIRDirty_0_N( 0, "add_one_BB_completed",
VG_(fnptr_to_fnentry)( &add_one_BB_completed ),
mkIRExprVec_0() );
- addStmtToIRBB( bbOut, IRStmt_Dirty(di) );
+ addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
}
if (clo_trace_mem) {
/*------------------------------------------------------------*/
static
-IRBB* ms_instrument ( VgCallbackClosure* closure,
- IRBB* bb_in,
+IRSB* ms_instrument ( VgCallbackClosure* closure,
+ IRSB* bb_in,
VexGuestLayout* layout,
VexGuestExtents* vge,
IRType gWordTy, IRType hWordTy )
/* Functions defined in mc_translate.c */
extern
-IRBB* MC_(instrument) ( VgCallbackClosure* closure,
- IRBB* bb_in,
+IRSB* MC_(instrument) ( VgCallbackClosure* closure,
+ IRSB* bb_in,
VexGuestLayout* layout,
VexGuestExtents* vge,
IRType gWordTy, IRType hWordTy );
/* Carries around state during memcheck instrumentation. */
typedef
struct _MCEnv {
- /* MODIFIED: the bb being constructed. IRStmts are added. */
- IRBB* bb;
+ /* MODIFIED: the superblock being constructed. IRStmts are
+ added. */
+ IRSB* bb;
/* MODIFIED: a table [0 .. #temps_in_original_bb-1] which maps
original temps to their current their current shadow temp.
{
if (a1->tag == Iex_Const)
return True;
- if (a1->tag == Iex_Tmp && a1->Iex.Tmp.tmp < mce->n_originalTmps)
+ if (a1->tag == Iex_RdTmp && a1->Iex.RdTmp.tmp < mce->n_originalTmps)
return True;
return False;
}
{
if (a1->tag == Iex_Const)
return True;
- if (a1->tag == Iex_Tmp && a1->Iex.Tmp.tmp >= mce->n_originalTmps)
+ if (a1->tag == Iex_RdTmp && a1->Iex.RdTmp.tmp >= mce->n_originalTmps)
return True;
return False;
}
are identically-kinded. */
static Bool sameKindedAtoms ( IRAtom* a1, IRAtom* a2 )
{
- if (a1->tag == Iex_Tmp && a2->tag == Iex_Tmp)
+ if (a1->tag == Iex_RdTmp && a2->tag == Iex_RdTmp)
return True;
if (a1->tag == Iex_Const && a2->tag == Iex_Const)
return True;
/* assign value to tmp */
#define assign(_bb,_tmp,_expr) \
- addStmtToIRBB((_bb), IRStmt_Tmp((_tmp),(_expr)))
+ addStmtToIRSB((_bb), IRStmt_WrTmp((_tmp),(_expr)))
/* add stmt to a bb */
#define stmt(_bb,_stmt) \
- addStmtToIRBB((_bb), (_stmt))
+ addStmtToIRSB((_bb), (_stmt))
/* build various kinds of expressions */
#define binop(_op, _arg1, _arg2) IRExpr_Binop((_op),(_arg1),(_arg2))
#define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
#define mkU64(_n) IRExpr_Const(IRConst_U64(_n))
#define mkV128(_n) IRExpr_Const(IRConst_V128(_n))
-#define mkexpr(_tmp) IRExpr_Tmp((_tmp))
+#define mkexpr(_tmp) IRExpr_RdTmp((_tmp))
/* bind the given expression to a new temporary, and return the
temporary. This effectively converts an arbitrary expression into
getting a new value. */
tl_assert(isIRAtom(vatom));
/* sameKindedAtoms ... */
- if (vatom->tag == Iex_Tmp) {
- tl_assert(atom->tag == Iex_Tmp);
- newShadowTmp(mce, atom->Iex.Tmp.tmp);
- assign(mce->bb, findShadowTmp(mce, atom->Iex.Tmp.tmp),
+ if (vatom->tag == Iex_RdTmp) {
+ tl_assert(atom->tag == Iex_RdTmp);
+ newShadowTmp(mce, atom->Iex.RdTmp.tmp);
+ assign(mce->bb, findShadowTmp(mce, atom->Iex.RdTmp.tmp),
definedOfType(ty));
}
}
*/
static
void do_shadow_PUTI ( MCEnv* mce,
- IRArray* descr, IRAtom* ix, Int bias, IRAtom* atom )
+ IRRegArray* descr,
+ IRAtom* ix, Int bias, IRAtom* atom )
{
IRAtom* vatom;
IRType ty, tyS;
} else {
/* Do a cloned version of the Put that refers to the shadow
area. */
- IRArray* new_descr
- = mkIRArray( descr->base + mce->layout->total_sizeB,
- tyS, descr->nElems);
+ IRRegArray* new_descr
+ = mkIRRegArray( descr->base + mce->layout->total_sizeB,
+ tyS, descr->nElems);
stmt( mce->bb, IRStmt_PutI( new_descr, ix, bias, vatom ));
}
}
given GETI (passed in in pieces).
*/
static
-IRExpr* shadow_GETI ( MCEnv* mce, IRArray* descr, IRAtom* ix, Int bias )
+IRExpr* shadow_GETI ( MCEnv* mce,
+ IRRegArray* descr, IRAtom* ix, Int bias )
{
IRType ty = descr->elemTy;
IRType tyS = shadowType(ty);
} else {
/* return a cloned version of the Get that refers to the shadow
area. */
- IRArray* new_descr
- = mkIRArray( descr->base + mce->layout->total_sizeB,
- tyS, descr->nElems);
+ IRRegArray* new_descr
+ = mkIRRegArray( descr->base + mce->layout->total_sizeB,
+ tyS, descr->nElems);
return IRExpr_GetI( new_descr, ix, bias );
}
}
return shadow_GETI( mce, e->Iex.GetI.descr,
e->Iex.GetI.ix, e->Iex.GetI.bias );
- case Iex_Tmp:
- return IRExpr_Tmp( findShadowTmp(mce, e->Iex.Tmp.tmp) );
+ case Iex_RdTmp:
+ return IRExpr_RdTmp( findShadowTmp(mce, e->Iex.RdTmp.tmp) );
case Iex_Const:
return definedOfType(shadowType(typeOfIRExpr(mce->bb->tyenv, e)));
ULong n = 0;
IRConst* con;
tl_assert(isIRAtom(at));
- if (at->tag == Iex_Tmp)
+ if (at->tag == Iex_RdTmp)
return False;
tl_assert(at->tag == Iex_Const);
con = at->Iex.Const.con;
IRExpr* e;
IRDirty* d;
switch (st->tag) {
- case Ist_Tmp:
- e = st->Ist.Tmp.data;
+ case Ist_WrTmp:
+ e = st->Ist.WrTmp.data;
switch (e->tag) {
case Iex_Get:
- case Iex_Tmp:
+ case Iex_RdTmp:
return False;
case Iex_Const:
return isBogusAtom(e);
}
-IRBB* MC_(instrument) ( VgCallbackClosure* closure,
- IRBB* bb_in,
+IRSB* MC_(instrument) ( VgCallbackClosure* closure,
+ IRSB* bb_in,
VexGuestLayout* layout,
VexGuestExtents* vge,
IRType gWordTy, IRType hWordTy )
Int i, j, first_stmt;
IRStmt* st;
MCEnv mce;
- IRBB* bb;
+ IRSB* bb;
if (gWordTy != hWordTy) {
/* We don't currently support this case. */
tl_assert(sizeof(UInt) == 4);
tl_assert(sizeof(Int) == 4);
- /* Set up BB */
- bb = dopyIRBBExceptStmts(bb_in);
+ /* Set up SB */
+ bb = deepCopyIRSBExceptStmts(bb_in);
/* Set up the running environment. Only .bb is modified as we go
along. */
tl_assert(st);
tl_assert(isFlatIRStmt(st));
- addStmtToIRBB( bb, bb_in->stmts[i] );
+ addStmtToIRSB( bb, bb_in->stmts[i] );
i++;
}
'tmp = CONSTANT'.
*/
for (j = 0; j < i; j++) {
- if (bb_in->stmts[j]->tag == Ist_Tmp) {
+ if (bb_in->stmts[j]->tag == Ist_WrTmp) {
/* findShadowTmp checks its arg is an original tmp;
no need to assert that here. */
- IRTemp tmp_o = bb_in->stmts[j]->Ist.Tmp.tmp;
+ IRTemp tmp_o = bb_in->stmts[j]->Ist.WrTmp.tmp;
IRTemp tmp_s = findShadowTmp(&mce, tmp_o);
IRType ty_s = typeOfIRTemp(bb->tyenv, tmp_s);
assign( bb, tmp_s, definedOfType( ty_s ) );
switch (st->tag) {
- case Ist_Tmp:
- assign( bb, findShadowTmp(&mce, st->Ist.Tmp.tmp),
- expr2vbits( &mce, st->Ist.Tmp.data) );
+ case Ist_WrTmp:
+ assign( bb, findShadowTmp(&mce, st->Ist.WrTmp.tmp),
+ expr2vbits( &mce, st->Ist.WrTmp.data) );
break;
case Ist_Put:
}
/* ... and finally copy the stmt itself to the output. */
- addStmtToIRBB(bb, st);
+ addStmtToIRSB(bb, st);
}
}
static
-IRBB* nl_instrument ( VgCallbackClosure* closure,
- IRBB* bb,
+IRSB* nl_instrument ( VgCallbackClosure* closure,
+ IRSB* bb,
VexGuestLayout* layout,
VexGuestExtents* vge,
IRType gWordTy, IRType hWordTy )