0/*regparms*/,
"amd64g_dirtyhelper_FLDENV",
&amd64g_dirtyhelper_FLDENV,
- mkIRExprVec_2( IRExprP__BBPTR, mkexpr(addr) )
+ mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
);
d->tmp = w64;
/* declare we're reading memory */
0/*regparms*/,
"amd64g_dirtyhelper_FSTENV",
&amd64g_dirtyhelper_FSTENV,
- mkIRExprVec_2( IRExprP__BBPTR, mkexpr(addr) )
+ mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
);
/* declare we're writing memory */
d->mFx = Ifx_Write;
0/*regparms*/,
"amd64g_dirtyhelper_FINIT",
&amd64g_dirtyhelper_FINIT,
- mkIRExprVec_1( IRExprP__BBPTR )
+ mkIRExprVec_1( IRExpr_BBPTR() )
);
/* declare we're writing guest state */
0/*regparms*/,
"amd64g_dirtyhelper_FRSTOR",
&amd64g_dirtyhelper_FRSTOR,
- mkIRExprVec_2( IRExprP__BBPTR, mkexpr(addr) )
+ mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
);
d->mSize = 108;
}
0/*regparms*/,
"amd64g_dirtyhelper_FNSAVE",
&amd64g_dirtyhelper_FNSAVE,
- mkIRExprVec_2( IRExprP__BBPTR, mkexpr(addr) )
+ mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
);
d->mSize = 108;
}
0/*regparms*/,
"amd64g_dirtyhelper_FXSAVE",
&amd64g_dirtyhelper_FXSAVE,
- mkIRExprVec_2( IRExprP__BBPTR, mkexpr(addr) )
+ mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
);
/* declare we're writing memory */
0/*regparms*/,
"amd64g_dirtyhelper_FXRSTOR",
&amd64g_dirtyhelper_FXRSTOR,
- mkIRExprVec_2( IRExprP__BBPTR, mkexpr(addr) )
+ mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
);
/* declare we're reading memory */
IRExpr* gstOffLe = mkU64(gstOffL);
IRExpr* gstOffRe = mkU64(gstOffR);
IRExpr** args
- = mkIRExprVec_5( IRExprP__BBPTR, opc4, gstOffDe, gstOffLe, gstOffRe );
+ = mkIRExprVec_5( IRExpr_BBPTR(), opc4, gstOffDe, gstOffLe, gstOffRe );
IRDirty* d = unsafeIRDirty_0_N( 0/*regparms*/, nm, fn, args );
/* It's not really a dirty call, but we can't use the clean helper
IRExpr* gstOffLe = mkU64(gstOffL);
IRExpr* gstOffRe = mkU64(gstOffR);
IRExpr** args
- = mkIRExprVec_4( IRExprP__BBPTR, imme, gstOffLe, gstOffRe );
+ = mkIRExprVec_4( IRExpr_BBPTR(), imme, gstOffLe, gstOffRe );
IRDirty* d = unsafeIRDirty_0_N( 0/*regparms*/, nm, fn, args );
/* It's not really a dirty call, but we can't use the clean helper
IRExpr* edxIN = isISTRx ? mkU64(0) : getIRegRDX(8);
IRExpr* eaxIN = isISTRx ? mkU64(0) : getIRegRAX(8);
IRExpr** args
- = mkIRExprVec_6( IRExprP__BBPTR,
+ = mkIRExprVec_6( IRExpr_BBPTR(),
opc4_and_imm, gstOffLe, gstOffRe, edxIN, eaxIN );
IRTemp resT = newTemp(Ity_I64);
void* fAddr = &amd64g_dirtyhelper_RDTSCP;
IRDirty* d
= unsafeIRDirty_0_N ( 0/*regparms*/,
- fName, fAddr, mkIRExprVec_1(IRExprP__BBPTR) );
+ fName, fAddr, mkIRExprVec_1(IRExpr_BBPTR()) );
/* declare guest state effects */
d->nFxState = 3;
vex_bzero(&d->fxState, sizeof(d->fxState));
vassert(fName); vassert(fAddr);
d = unsafeIRDirty_0_N ( 0/*regparms*/,
- fName, fAddr, mkIRExprVec_1(IRExprP__BBPTR) );
+ fName, fAddr, mkIRExprVec_1(IRExpr_BBPTR()) );
/* declare guest state effects */
d->nFxState = 4;
vex_bzero(&d->fxState, sizeof(d->fxState));
if (rs == 0) { /* MFC0 */
DIP("mfc0 r%d, r%d, %d", rt, rd, sel);
IRTemp val = newTemp(Ity_I32);
- IRExpr** args = mkIRExprVec_3 (IRExprP__BBPTR, mkU32(rd), mkU32(sel));
+ IRExpr** args = mkIRExprVec_3 (IRExpr_BBPTR(), mkU32(rd), mkU32(sel));
IRDirty *d = unsafeIRDirty_1_N(val,
0,
"mips32_dirtyhelper_mfc0",
/* Doubleword Move from Coprocessor 0 - DMFC0; MIPS64 */
DIP("dmfc0 r%d, r%d, %d", rt, rd, sel);
IRTemp val = newTemp(Ity_I64);
- IRExpr** args = mkIRExprVec_3 (IRExprP__BBPTR, mkU64(rd), mkU64(sel));
+ IRExpr** args = mkIRExprVec_3 (IRExpr_BBPTR(), mkU64(rd), mkU64(sel));
IRDirty *d = unsafeIRDirty_1_N(val,
0,
"mips64_dirtyhelper_dmfc0",
#if defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev >= 2))
} else if (rd == 1) {
IRTemp val = newTemp(Ity_I64);
- IRExpr** args = mkIRExprVec_3 (IRExprP__BBPTR,
+ IRExpr** args = mkIRExprVec_3 (IRExpr_BBPTR(),
mkU64(rt), mkU64(rd));
IRDirty *d = unsafeIRDirty_1_N(val,
0,
IRDirty* d;
UInt vD_off = vectorGuestRegOffset(vD_addr);
IRExpr** args = mkIRExprVec_4(
- IRExprP__BBPTR,
+ IRExpr_BBPTR(),
mkU32(vD_off),
binop(Iop_And32, mkNarrowTo32(ty, mkexpr(EA)),
mkU32(0xF)),
IRDirty* d;
UInt vD_off = vectorGuestRegOffset(vD_addr);
IRExpr** args = mkIRExprVec_4(
- IRExprP__BBPTR,
+ IRExpr_BBPTR(),
mkU32(vD_off),
binop(Iop_And32, mkNarrowTo32(ty, mkexpr(EA)),
mkU32(0xF)),
IRDirty *d;
IRTemp cc = newTemp(Ity_I64);
- /* IRExprP__BBPTR => Need to pass pointer to guest state to helper */
+ /* IRExpr_BBPTR() => Need to pass pointer to guest state to helper */
d = unsafeIRDirty_1_N(cc, 0, "s390x_dirtyhelper_STFLE",
&s390x_dirtyhelper_STFLE,
- mkIRExprVec_2(IRExprP__BBPTR, mkexpr(op2addr)));
+ mkIRExprVec_2(IRExpr_BBPTR(), mkexpr(op2addr)));
d->nFxState = 1;
vex_bzero(&d->fxState, sizeof(d->fxState));
0/*regparms*/,
"x86g_dirtyhelper_FLDENV",
&x86g_dirtyhelper_FLDENV,
- mkIRExprVec_2( IRExprP__BBPTR, mkexpr(addr) )
+ mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
);
d->tmp = ew;
/* declare we're reading memory */
0/*regparms*/,
"x86g_dirtyhelper_FSTENV",
&x86g_dirtyhelper_FSTENV,
- mkIRExprVec_2( IRExprP__BBPTR, mkexpr(addr) )
+ mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
);
/* declare we're writing memory */
d->mFx = Ifx_Write;
0/*regparms*/,
"x86g_dirtyhelper_FINIT",
&x86g_dirtyhelper_FINIT,
- mkIRExprVec_1(IRExprP__BBPTR)
+ mkIRExprVec_1(IRExpr_BBPTR())
);
/* declare we're writing guest state */
0/*regparms*/,
"x86g_dirtyhelper_FRSTOR",
&x86g_dirtyhelper_FRSTOR,
- mkIRExprVec_2( IRExprP__BBPTR, mkexpr(addr) )
+ mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
);
d->tmp = ew;
/* declare we're reading memory */
0/*regparms*/,
"x86g_dirtyhelper_FSAVE",
&x86g_dirtyhelper_FSAVE,
- mkIRExprVec_2( IRExprP__BBPTR, mkexpr(addr) )
+ mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
);
/* declare we're writing memory */
d->mFx = Ifx_Write;
0/*regparms*/,
"x86g_dirtyhelper_FXSAVE",
&x86g_dirtyhelper_FXSAVE,
- mkIRExprVec_2( IRExprP__BBPTR, mkexpr(addr) )
+ mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
);
/* declare we're writing memory */
0/*regparms*/,
"x86g_dirtyhelper_FXRSTOR",
&x86g_dirtyhelper_FXRSTOR,
- mkIRExprVec_2( IRExprP__BBPTR, mkexpr(addr) )
+ mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
);
/* declare we're reading memory */
vassert(fName); vassert(fAddr);
d = unsafeIRDirty_0_N ( 0/*regparms*/,
- fName, fAddr, mkIRExprVec_1(IRExprP__BBPTR) );
+ fName, fAddr, mkIRExprVec_1(IRExpr_BBPTR()) );
/* declare guest state effects */
d->nFxState = 4;
vex_bzero(&d->fxState, sizeof(d->fxState));
IRExpr* e )
{
/* Per comments in doHelperCall below, appearance of
- IRExprP__VECRET implies ill-formed IR. */
- vassert(e != IRExprP__VECRET);
+ Iex_VECRET implies ill-formed IR. */
+ vassert(e->tag != Iex_VECRET);
/* In this case we give out a copy of the BaseBlock pointer. */
- if (UNLIKELY(e == IRExprP__BBPTR)) {
+ if (UNLIKELY(e->tag == Iex_BBPTR)) {
return mk_iMOVsd_RR( hregAMD64_RBP(), dst );
}
*retloc = mk_RetLoc_INVALID();
/* These are used for cross-checking that IR-level constraints on
- the use of IRExprP__VECRET and IRExprP__BBPTR are observed. */
+ the use of IRExpr_VECRET() and IRExpr_BBPTR() are observed. */
UInt nVECRETs = 0;
UInt nBBPTRs = 0;
The return type can be I{64,32,16,8} or V{128,256}. In the
latter two cases, it is expected that |args| will contain the
- special value IRExprP__VECRET, in which case this routine
+ special node IRExpr_VECRET(), in which case this routine
generates code to allocate space on the stack for the vector
return value. Since we are not passing any scalars on the
stack, it is enough to preallocate the return space before
marshalling any arguments, in this case.
- |args| may also contain IRExprP__BBPTR, in which case the
+ |args| may also contain IRExpr_BBPTR(), in which case the
value in %rbp is passed as the corresponding argument.
Generating code which is both efficient and correct when
unconditional calls may use the fast scheme, since having to
compute a condition expression could itself trash real
registers. Note that for simplicity, in the case where
- IRExprP__VECRET is present, we use the slow scheme. This is
+ IRExpr_VECRET() is present, we use the slow scheme. This is
motivated by the desire to avoid any possible complexity
w.r.t. nested calls.
/* FAST SCHEME */
/* In this loop, we process args that can be computed into the
destination (real) register with a single instruction, without
- using any fixed regs. That also includes IRExprP__BBPTR, but
- not IRExprP__VECRET. Indeed, if the IR is well-formed, we can
- never see IRExprP__VECRET at this point, since the return-type
+ using any fixed regs. That also includes IRExpr_BBPTR(), but
+ not IRExpr_VECRET(). Indeed, if the IR is well-formed, we can
+ never see IRExpr_VECRET() at this point, since the return-type
check above should ensure all those cases use the slow scheme
instead. */
vassert(n_args >= 0 && n_args <= 6);
for (i = 0; i < n_args; i++) {
IRExpr* arg = args[i];
- if (LIKELY(!is_IRExprP__VECRET_or_BBPTR(arg))) {
+ if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg))) {
vassert(typeOfIRExpr(env->type_env, args[i]) == Ity_I64);
}
fastinstrs[i]
vassert(n_args >= 0 && n_args <= 6);
for (i = 0; i < n_args; i++) {
IRExpr* arg = args[i];
- if (UNLIKELY(arg == IRExprP__BBPTR)) {
+ if (UNLIKELY(arg->tag == Iex_BBPTR)) {
tmpregs[i] = newVRegI(env);
addInstr(env, mk_iMOVsd_RR( hregAMD64_RBP(), tmpregs[i]));
nBBPTRs++;
}
- else if (UNLIKELY(arg == IRExprP__VECRET)) {
+ else if (UNLIKELY(arg->tag == Iex_VECRET)) {
/* We stashed the address of the return slot earlier, so just
retrieve it now. */
vassert(!hregIsInvalid(r_vecRetAddr));
static
Bool mightRequireFixedRegs ( IRExpr* e )
{
- if (UNLIKELY(is_IRExprP__VECRET_or_BBPTR(e))) {
+ if (UNLIKELY(is_IRExpr_VECRET_or_BBPTR(e))) {
// These are always "safe" -- either a copy of r13(sp) in some
// arbitrary vreg, or a copy of r8, respectively.
return False;
*retloc = mk_RetLoc_INVALID();
/* These are used for cross-checking that IR-level constraints on
- the use of IRExprP__VECRET and IRExprP__BBPTR are observed. */
+ the use of IRExpr_VECRET() and IRExpr_BBPTR() are observed. */
UInt nVECRETs = 0;
UInt nBBPTRs = 0;
supported arg types are I32 and I64.
The return type can be I{64,32} or V128. In the V128 case, it
- is expected that |args| will contain the special value
- IRExprP__VECRET, in which case this routine generates code to
+ is expected that |args| will contain the special node
+ IRExpr_VECRET(), in which case this routine generates code to
allocate space on the stack for the vector return value. Since
we are not passing any scalars on the stack, it is enough to
preallocate the return space before marshalling any arguments,
in this case.
- |args| may also contain IRExprP__BBPTR, in which case the
+ |args| may also contain IRExpr_BBPTR(), in which case the
value in r8 is passed as the corresponding argument.
Generating code which is both efficient and correct when
n_args = 0;
for (i = 0; args[i]; i++) {
IRExpr* arg = args[i];
- if (UNLIKELY(arg == IRExprP__VECRET)) {
+ if (UNLIKELY(arg->tag == Iex_VECRET)) {
nVECRETs++;
- } else if (UNLIKELY(arg == IRExprP__BBPTR)) {
+ } else if (UNLIKELY(arg->tag == Iex_BBPTR)) {
nBBPTRs++;
}
n_args++;
IRExpr* arg = args[i];
IRType aTy = Ity_INVALID;
- if (LIKELY(!is_IRExprP__VECRET_or_BBPTR(arg)))
+ if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
aTy = typeOfIRExpr(env->type_env, arg);
if (nextArgReg >= ARM_N_ARGREGS)
addInstr(env, mk_iMOVds_RR( argregs[nextArgReg], raHi ));
nextArgReg++;
}
- else if (arg == IRExprP__BBPTR) {
+ else if (arg->tag == Iex_BBPTR) {
vassert(0); //ATC
addInstr(env, mk_iMOVds_RR( argregs[nextArgReg],
hregARM_R8() ));
nextArgReg++;
}
- else if (arg == IRExprP__VECRET) {
+ else if (arg->tag == Iex_VECRET) {
// If this happens, it denotes ill-formed IR
vassert(0);
}
IRExpr* arg = args[i];
IRType aTy = Ity_INVALID;
- if (LIKELY(!is_IRExprP__VECRET_or_BBPTR(arg)))
+ if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
aTy = typeOfIRExpr(env->type_env, arg);
if (nextArgReg >= ARM_N_ARGREGS)
tmpregs[nextArgReg] = raHi;
nextArgReg++;
}
- else if (arg == IRExprP__BBPTR) {
+ else if (arg->tag == Iex_BBPTR) {
vassert(0); //ATC
tmpregs[nextArgReg] = hregARM_R8();
nextArgReg++;
}
- else if (arg == IRExprP__VECRET) {
+ else if (arg->tag == Iex_VECRET) {
// If this happens, it denotes ill-formed IR
vassert(0);
}
*retloc = mk_RetLoc_INVALID();
/* These are used for cross-checking that IR-level constraints on
- the use of IRExprP__VECRET and IRExprP__BBPTR are observed. */
+ the use of IRExpr_VECRET() and IRExpr_BBPTR() are observed. */
UInt nVECRETs = 0;
UInt nBBPTRs = 0;
/* The return type can be I{64,32,16,8} or V{128,256}. In the
latter two cases, it is expected that |args| will contain the
- special value IRExprP__VECRET, in which case this routine
+ special node IRExpr_VECRET(), in which case this routine
generates code to allocate space on the stack for the vector
return value. Since we are not passing any scalars on the
stack, it is enough to preallocate the return space before
marshalling any arguments, in this case.
- |args| may also contain IRExprP__BBPTR, in which case the value
+ |args| may also contain IRExpr_BBPTR(), in which case the value
in the guest state pointer register is passed as the
corresponding argument. */
n_args = 0;
for (i = 0; args[i]; i++) {
IRExpr* arg = args[i];
- if (UNLIKELY(arg == IRExprP__VECRET)) {
+ if (UNLIKELY(arg->tag == Iex_VECRET)) {
nVECRETs++;
- } else if (UNLIKELY(arg == IRExprP__BBPTR)) {
+ } else if (UNLIKELY(arg->tag == Iex_BBPTR)) {
nBBPTRs++;
}
n_args++;
vassert(argreg < MIPS_N_REGPARMS);
IRType aTy = Ity_INVALID;
- if (LIKELY(!is_IRExprP__VECRET_or_BBPTR(arg)))
+ if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
aTy = typeOfIRExpr(env->type_env, arg);
if (aTy == Ity_I32 || mode64) {
argiregs |= (1 << (argreg + 4));
addInstr(env, mk_iMOVds_RR( argregs[argreg], rLo));
argreg++;
- } else if (arg == IRExprP__BBPTR) {
+ } else if (arg->tag == Iex_BBPTR) {
vassert(0); // ATC
addInstr(env, mk_iMOVds_RR(argregs[argreg],
GuestStatePointer(mode64)));
argreg++;
- } else if (arg == IRExprP__VECRET) {
+ } else if (arg->tag == Iex_VECRET) {
// If this happens, it denotes ill-formed IR.
vassert(0);
}
IRExpr* arg = args[i];
IRType aTy = Ity_INVALID;
- if (LIKELY(!is_IRExprP__VECRET_or_BBPTR(arg)))
+ if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
aTy = typeOfIRExpr(env->type_env, arg);
if (aTy == Ity_I32 || mode64) {
argreg++;
tmpregs[argreg] = raHi;
argreg++;
- } else if (arg == IRExprP__BBPTR) {
+ } else if (arg->tag == Iex_BBPTR) {
vassert(0); // ATC
tmpregs[argreg] = GuestStatePointer(mode64);
argreg++;
}
- else if (arg == IRExprP__VECRET) {
+ else if (arg->tag == Iex_VECRET) {
// If this happens, it denotes ill-formed IR
vassert(0);
}
*retloc = mk_RetLoc_INVALID();
/* These are used for cross-checking that IR-level constraints on
- the use of IRExprP__VECRET and IRExprP__BBPTR are observed. */
+ the use of IRExpr_VECRET() and IRExpr_BBPTR() are observed. */
UInt nVECRETs = 0;
UInt nBBPTRs = 0;
The return type can be I{64,32,16,8} or V{128,256}. In the
latter two cases, it is expected that |args| will contain the
- special value IRExprP__VECRET, in which case this routine
+ special node IRExpr_VECRET(), in which case this routine
generates code to allocate space on the stack for the vector
return value. Since we are not passing any scalars on the
stack, it is enough to preallocate the return space before
marshalling any arguments, in this case.
- |args| may also contain IRExprP__BBPTR, in which case the value
+ |args| may also contain IRExpr_BBPTR(), in which case the value
in the guest state pointer register is passed as the
corresponding argument.
if (go_fast) {
for (i = 0; i < n_args; i++) {
IRExpr* arg = args[i];
- if (UNLIKELY(arg == IRExprP__BBPTR)) {
+ if (UNLIKELY(arg->tag == Iex_BBPTR)) {
/* that's OK */
}
- else if (UNLIKELY(arg == IRExprP__VECRET)) {
+ else if (UNLIKELY(arg->tag == Iex_VECRET)) {
/* This implies ill-formed IR, since if the IR was
well-formed, the return-type test above would have
filtered it out. */
IRExpr* arg = args[i];
vassert(argreg < PPC_N_REGPARMS);
- if (arg == IRExprP__BBPTR) {
+ if (arg->tag == Iex_BBPTR) {
argiregs |= (1 << (argreg+3));
addInstr(env, mk_iMOVds_RR( argregs[argreg],
GuestStatePtr(mode64) ));
argreg++;
} else {
- vassert(arg != IRExprP__VECRET);
+ vassert(arg->tag != Iex_VECRET);
IRType ty = typeOfIRExpr(env->type_env, arg);
vassert(ty == Ity_I32 || ty == Ity_I64);
if (!mode64) {
for (i = 0; i < n_args; i++) {
IRExpr* arg = args[i];
vassert(argreg < PPC_N_REGPARMS);
- if (UNLIKELY(arg == IRExprP__BBPTR)) {
+ if (UNLIKELY(arg->tag == Iex_BBPTR)) {
tmpregs[argreg] = newVRegI(env);
addInstr(env, mk_iMOVds_RR( tmpregs[argreg],
GuestStatePtr(mode64) ));
nBBPTRs++;
}
- else if (UNLIKELY(arg == IRExprP__VECRET)) {
+ else if (UNLIKELY(arg->tag == Iex_VECRET)) {
/* We stashed the address of the return slot earlier, so just
retrieve it now. */
vassert(!hregIsInvalid(r_vecRetAddr));
/* The return type can be I{64,32,16,8} or V{128,256}. In the
latter two cases, it is expected that |args| will contain the
- special value IRExprP__VECRET, in which case this routine
+ special node IRExpr_VECRET(), in which case this routine
generates code to allocate space on the stack for the vector
return value. Since we are not passing any scalars on the
stack, it is enough to preallocate the return space before
marshalling any arguments, in this case.
- |args| may also contain IRExprP__BBPTR, in which case the value
+ |args| may also contain IRExpr_BBPTR(), in which case the value
in the guest state pointer register is passed as the
corresponding argument.
These are used for cross-checking that IR-level constraints on
- the use of IRExprP__VECRET and IRExprP__BBPTR are observed. */
+ the use of IRExpr_VECRET() and IRExpr_BBPTR() are observed. */
UInt nVECRETs = 0;
UInt nBBPTRs = 0;
*/
Int arg_errors = 0;
for (i = 0; i < n_args; ++i) {
- if (UNLIKELY(args[i] == IRExprP__VECRET)) {
+ if (UNLIKELY(args[i]->tag == Iex_VECRET)) {
nVECRETs++;
- } else if (UNLIKELY(args[i] == IRExprP__BBPTR)) {
+ } else if (UNLIKELY(args[i]->tag == Iex_BBPTR)) {
nBBPTRs++;
} else {
IRType type = typeOfIRExpr(env->type_env, args[i]);
/* Compute the function arguments into a temporary register each */
for (i = 0; i < n_args; i++) {
IRExpr *arg = args[i];
- if(UNLIKELY(arg == IRExprP__VECRET)) {
+ if(UNLIKELY(arg->tag == Iex_VECRET)) {
/* we do not handle vector types yet */
vassert(0);
addInstr(env, s390_insn_move(sizeof(ULong), tmpregs[argreg],
r_vecRetAddr));
- } else if (UNLIKELY(arg == IRExprP__BBPTR)) {
+ } else if (UNLIKELY(arg->tag == Iex_BBPTR)) {
/* If we need the guest state pointer put it in a temporary arg reg */
tmpregs[argreg] = newVRegI(env);
addInstr(env, s390_insn_move(sizeof(ULong), tmpregs[argreg],
/* Push an arg onto the host stack, in preparation for a call to a
helper function of some kind. Returns the number of 32-bit words
- pushed. If we encounter an IRExprP__VECRET then we expect that
+ pushed. If we encounter an IRExpr_VECRET() then we expect that
r_vecRetAddr will be a valid register, that holds the relevant
address.
*/
static Int pushArg ( ISelEnv* env, IRExpr* arg, HReg r_vecRetAddr )
{
- if (UNLIKELY(arg == IRExprP__VECRET)) {
+ if (UNLIKELY(arg->tag == Iex_VECRET)) {
vassert(0); //ATC
vassert(!hregIsInvalid(r_vecRetAddr));
addInstr(env, X86Instr_Push(X86RMI_Reg(r_vecRetAddr)));
return 1;
}
- if (UNLIKELY(arg == IRExprP__BBPTR)) {
+ if (UNLIKELY(arg->tag == Iex_BBPTR)) {
addInstr(env, X86Instr_Push(X86RMI_Reg(hregX86_EBP())));
return 1;
}
static
Bool mightRequireFixedRegs ( IRExpr* e )
{
- if (UNLIKELY(is_IRExprP__VECRET_or_BBPTR(e))) {
+ if (UNLIKELY(is_IRExpr_VECRET_or_BBPTR(e))) {
// These are always "safe" -- either a copy of %esp in some
// arbitrary vreg, or a copy of %ebp, respectively.
return False;
*retloc = mk_RetLoc_INVALID();
/* These are used for cross-checking that IR-level constraints on
- the use of IRExprP__VECRET and IRExprP__BBPTR are observed. */
+ the use of Iex_VECRET and Iex_BBPTR are observed. */
UInt nVECRETs = 0;
UInt nBBPTRs = 0;
* The return type can be I{64,32,16,8} or V128. In the V128
case, it is expected that |args| will contain the special
- value IRExprP__VECRET, in which case this routine generates
+ node IRExpr_VECRET(), in which case this routine generates
code to allocate space on the stack for the vector return
value. Since we are not passing any scalars on the stack, it
is enough to preallocate the return space before marshalling
any arguments, in this case.
- |args| may also contain IRExprP__BBPTR, in which case the
+ |args| may also contain IRExpr_BBPTR(), in which case the
value in %ebp is passed as the corresponding argument.
* If the callee claims regparmness of 1, 2 or 3, we must pass the
while (args[n_args]) {
IRExpr* arg = args[n_args];
n_args++;
- if (UNLIKELY(arg == IRExprP__VECRET)) {
+ if (UNLIKELY(arg->tag == Iex_VECRET)) {
nVECRETs++;
- } else if (UNLIKELY(arg == IRExprP__BBPTR)) {
+ } else if (UNLIKELY(arg->tag == Iex_BBPTR)) {
nBBPTRs++;
}
}
IRExpr* arg = args[i];
argreg--;
vassert(argreg >= 0);
- if (UNLIKELY(arg == IRExprP__VECRET)) {
+ if (UNLIKELY(arg->tag == Iex_VECRET)) {
vassert(0); //ATC
}
- else if (UNLIKELY(arg == IRExprP__BBPTR)) {
+ else if (UNLIKELY(arg->tag == Iex_BBPTR)) {
vassert(0); //ATC
} else {
vassert(typeOfIRExpr(env->type_env, arg) == Ity_I32);
IRExpr* arg = args[i];
argreg--;
vassert(argreg >= 0);
- if (UNLIKELY(arg == IRExprP__VECRET)) {
+ if (UNLIKELY(arg->tag == Iex_VECRET)) {
vassert(!hregIsInvalid(r_vecRetAddr));
addInstr(env, X86Instr_Alu32R(Xalu_MOV,
X86RMI_Reg(r_vecRetAddr),
argregs[argreg]));
}
- else if (UNLIKELY(arg == IRExprP__BBPTR)) {
+ else if (UNLIKELY(arg->tag == Iex_BBPTR)) {
vassert(0); //ATC
} else {
vassert(typeOfIRExpr(env->type_env, arg) == Ity_I32);
vex_printf("(");
for (i = 0; e->Iex.CCall.args[i] != NULL; i++) {
IRExpr* arg = e->Iex.CCall.args[i];
- /* We don't actually expect VECRET or BBPTR here -- BBPTR is
- never allowable; VECRET is in principle allowable but at
- present isn't supported. But they are handled for
- completeness anyway. */
- if (arg == IRExprP__VECRET) {
- vex_printf("VECRET");
- } else if (arg == IRExprP__BBPTR) {
- vex_printf("BBPTR");
- } else {
- ppIRExpr(arg);
- }
+ ppIRExpr(arg);
+
if (e->Iex.CCall.args[i+1] != NULL) {
vex_printf(",");
}
ppIRExpr(e->Iex.ITE.iffalse);
vex_printf(")");
break;
+ case Iex_VECRET:
+ vex_printf("VECRET");
+ break;
+ case Iex_BBPTR:
+ vex_printf("BBPTR");
+ break;
default:
vpanic("ppIRExpr");
}
vex_printf("(");
for (i = 0; d->args[i] != NULL; i++) {
IRExpr* arg = d->args[i];
- if (arg == IRExprP__VECRET) {
- vex_printf("VECRET");
- } else if (arg == IRExprP__BBPTR) {
- vex_printf("BBPTR");
- } else {
- ppIRExpr(arg);
- }
+ ppIRExpr(arg);
+
if (d->args[i+1] != NULL) {
vex_printf(",");
}
e->Iex.ITE.iffalse = iffalse;
return e;
}
+IRExpr* IRExpr_VECRET ( void ) {
+ IRExpr* e = LibVEX_Alloc(sizeof(IRExpr));
+ e->tag = Iex_VECRET;
+ return e;
+}
+IRExpr* IRExpr_BBPTR ( void ) {
+ IRExpr* e = LibVEX_Alloc(sizeof(IRExpr));
+ e->tag = Iex_BBPTR;
+ return e;
+}
/* Constructors for NULL-terminated IRExpr expression vectors,
return IRExpr_ITE(deepCopyIRExpr(e->Iex.ITE.cond),
deepCopyIRExpr(e->Iex.ITE.iftrue),
deepCopyIRExpr(e->Iex.ITE.iffalse));
+ case Iex_VECRET:
+ return IRExpr_VECRET();
+
+ case Iex_BBPTR:
+ return IRExpr_BBPTR();
+
default:
vpanic("deepCopyIRExpr");
}
/* return typeOfIRExpr(tyenv, e->Iex.ITE.iffalse); */
case Iex_Binder:
vpanic("typeOfIRExpr: Binder is not a valid expression");
+ case Iex_VECRET:
+ vpanic("typeOfIRExpr: VECRET is not a valid expression");
+ case Iex_BBPTR:
+ vpanic("typeOfIRExpr: BBPTR is not a valid expression");
default:
ppIRExpr(e);
vpanic("typeOfIRExpr");
*/
static inline Bool isIRAtom_or_VECRET_or_BBPTR ( IRExpr* e ) {
- /* Use this rather roundabout scheme so as to try and have the
- number of additional conditional branches be 1 in the common
- (non-VECRET, non-BBPTR) case, rather than 2. */
- if (UNLIKELY(((HWord)e) & 1)) {
- return e == IRExprP__VECRET || e == IRExprP__BBPTR;
- } else {
- return isIRAtom(e);
- }
+ if (isIRAtom(e)) {
+ return True;
+ }
+
+ return UNLIKELY(is_IRExpr_VECRET_or_BBPTR(e));
}
Bool isFlatIRStmt ( IRStmt* st )
case Iex_CCall:
for (i = 0; expr->Iex.CCall.args[i]; i++) {
IRExpr* arg = expr->Iex.CCall.args[i];
- if (UNLIKELY(((HWord)arg) & 1)) {
+ if (UNLIKELY(is_IRExpr_VECRET_or_BBPTR(arg))) {
/* These aren't allowed in CCall lists. Let's detect
and throw them out here, though, rather than
segfaulting a bit later on. */
d = stmt->Ist.Dirty.details;
for (i = 0; d->args[i] != NULL; i++) {
IRExpr* arg = d->args[i];
- if (UNLIKELY(((HWord)arg) & 1)) {
+ if (UNLIKELY(is_IRExpr_VECRET_or_BBPTR(arg))) {
/* This is ensured by isFlatIRStmt */
- vassert(arg == IRExprP__VECRET || arg == IRExprP__BBPTR);
+ ;
} else {
useBeforeDef_Expr(bb,stmt,arg,def_counts);
}
if (i >= 32)
sanityCheckFail(bb,stmt,"Iex.CCall: > 32 args");
IRExpr* arg = expr->Iex.CCall.args[i];
- if (UNLIKELY(is_IRExprP__VECRET_or_BBPTR(arg)))
+ if (UNLIKELY(is_IRExpr_VECRET_or_BBPTR(arg)))
sanityCheckFail(bb,stmt,"Iex.CCall.args: is VECRET/BBPTR");
tcExpr(bb,stmt, arg, gWordTy);
}
if (i >= 32)
sanityCheckFail(bb,stmt,"IRStmt.Dirty: > 32 args");
IRExpr* arg = d->args[i];
- if (UNLIKELY(((HWord)arg) & 1)) {
- if (arg == IRExprP__VECRET) {
- nVECRETs++;
- } else
- if (arg == IRExprP__BBPTR) {
- nBBPTRs++;
- } else {
- /* The impossibility of failure is ensured by
- isFlatIRStmt */
- vassert(0);
- }
+ if (UNLIKELY(arg->tag == Iex_VECRET)) {
+ nVECRETs++;
+ } else if (UNLIKELY(arg->tag == Iex_BBPTR)) {
+ nBBPTRs++;
} else {
if (typeOfIRExpr(tyenv, arg) == Ity_I1)
sanityCheckFail(bb,stmt,"IRStmt.Dirty.arg[i] :: Ity_I1");
d2->guard = flatten_Expr(bb, d2->guard);
for (i = 0; d2->args[i]; i++) {
IRExpr* arg = d2->args[i];
- if (LIKELY(!is_IRExprP__VECRET_or_BBPTR(arg)))
+ if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
d2->args[i] = flatten_Expr(bb, arg);
}
addStmtToIRSB(bb, IRStmt_Dirty(d2));
d2->guard = fold_Expr(env, subst_Expr(env, d2->guard));
for (i = 0; d2->args[i]; i++) {
IRExpr* arg = d2->args[i];
- if (LIKELY(!is_IRExprP__VECRET_or_BBPTR(arg))) {
+ if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg))) {
vassert(isIRAtom(arg));
d2->args[i] = fold_Expr(env, subst_Expr(env, arg));
}
addUses_Expr(set, d->guard);
for (i = 0; d->args[i] != NULL; i++) {
IRExpr* arg = d->args[i];
- if (LIKELY(!is_IRExprP__VECRET_or_BBPTR(arg)))
+ if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
addUses_Expr(set, arg);
}
return;
aoccCount_Expr(uses, d->guard);
for (i = 0; d->args[i]; i++) {
IRExpr* arg = d->args[i];
- if (LIKELY(!is_IRExprP__VECRET_or_BBPTR(arg)))
+ if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
aoccCount_Expr(uses, arg);
}
return;
d2->guard = atbSubst_Expr(env, d2->guard);
for (i = 0; d2->args[i]; i++) {
IRExpr* arg = d2->args[i];
- if (LIKELY(!is_IRExprP__VECRET_or_BBPTR(arg)))
+ if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
d2->args[i] = atbSubst_Expr(env, arg);
}
return IRStmt_Dirty(d2);
guest state under the covers. It's not allowed, but let's be
extra conservative and assume the worst. */
for (i = 0; d->args[i]; i++) {
- if (UNLIKELY(d->args[i] == IRExprP__BBPTR)) {
+ if (UNLIKELY(d->args[i]->tag == Iex_BBPTR)) {
*requiresPreciseMemExns = True;
return True;
}
vassert(isIRAtom(d->guard));
for (j = 0; d->args[j]; j++) {
IRExpr* arg = d->args[j];
- if (LIKELY(!is_IRExprP__VECRET_or_BBPTR(arg)))
+ if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
vassert(isIRAtom(arg));
}
if (d->mFx != Ifx_None)
Iex_Load,
Iex_Const,
Iex_ITE,
- Iex_CCall
+ Iex_CCall,
+ Iex_VECRET,
+ Iex_BBPTR
}
IRExprTag;
quite poor code to be generated. Try to avoid it.
In principle it would be allowable to have the arg vector
- contain the special value IRExprP__VECRET, although not
- IRExprP__BBPTR. However, at the moment there is no
- requirement for clean helper calls to be able to return V128
- or V256 values. Hence this is not allowed.
+ contain an IRExpr_VECRET(), although not IRExpr_BBPTR(). However,
+ at the moment there is no requirement for clean helper calls to
+ be able to return V128 or V256 values. Hence this is not allowed.
ppIRExpr output: <cee>(<args>):<retty>
eg. foo{0x80489304}(t1, t2):I32
};
-/* Two special constants of type IRExpr*, which can ONLY be used in
+/* Two special kinds of IRExpr, which can ONLY be used in
argument lists for dirty helper calls (IRDirty.args) and in NO
- OTHER PLACES. And then only in very limited ways. These constants
- are not pointer-aligned and hence can't be confused with real
- IRExpr*s nor with NULL. */
+ OTHER PLACES. And then only in very limited ways. */
/* Denotes an argument which (in the helper) takes a pointer to a
(naturally aligned) V128 or V256, into which the helper is expected
- to write its result. Use of IRExprP__VECRET is strictly
+ to write its result. Use of IRExpr_VECRET() is strictly
controlled. If the helper returns a V128 or V256 value then
- IRExprP__VECRET must appear exactly once in the arg list, although
+ IRExpr_VECRET() must appear exactly once in the arg list, although
it can appear anywhere, and the helper must have a C 'void' return
- type. If the helper returns any other type, IRExprP__VECRET may
+ type. If the helper returns any other type, IRExpr_VECRET() may
not appear in the argument list. */
-#define IRExprP__VECRET ((IRExpr*)9)
/* Denotes an void* argument which is passed to the helper, which at
run time will point to the thread's guest state area. This can
only appear at most once in an argument list, and it may not appear
at all in argument lists for clean helper calls. */
-#define IRExprP__BBPTR ((IRExpr*)17)
-static inline Bool is_IRExprP__VECRET_or_BBPTR ( IRExpr* e ) {
- return e == IRExprP__VECRET || e == IRExprP__BBPTR;
+static inline Bool is_IRExpr_VECRET_or_BBPTR ( IRExpr* e ) {
+ return e->tag == Iex_VECRET || e->tag == Iex_BBPTR;
}
extern IRExpr* IRExpr_Const ( IRConst* con );
extern IRExpr* IRExpr_CCall ( IRCallee* cee, IRType retty, IRExpr** args );
extern IRExpr* IRExpr_ITE ( IRExpr* cond, IRExpr* iftrue, IRExpr* iffalse );
+extern IRExpr* IRExpr_VECRET ( void );
+extern IRExpr* IRExpr_BBPTR ( void );
/* Deep-copy an IRExpr. */
extern IRExpr* deepCopyIRExpr ( IRExpr* );
number of times at a fixed interval, if required.
Normally, code is generated to pass just the args to the helper.
- However, if IRExprP__BBPTR is present in the argument list (at most
+ However, if IRExpr_BBPTR() is present in the argument list (at most
one instance is allowed), then the baseblock pointer is passed for
that arg, so that the callee can access the guest state. It is
- invalid for .nFxState to be zero but IRExprP__BBPTR to be present,
+ invalid for .nFxState to be zero but IRExpr_BBPTR() to be present,
since .nFxState==0 is a claim that the call does not access guest
state.
allowed. */
IRCallee* cee; /* where to call */
IRExpr* guard; /* :: Ity_Bit. Controls whether call happens */
- /* The args vector may contain IRExprP__BBPTR and/or
- IRExprP__VECRET, in both cases, at most once. */
+ /* The args vector may contain IRExpr_BBPTR() and/or
+ IRExpr_VECRET(), in both cases, at most once. */
IRExpr** args; /* arg vector, ends in NULL. */
IRTemp tmp; /* to assign result to, or IRTemp_INVALID if none */