Fixes BZ#373555.
git-svn-id: svn://svn.valgrind.org/vex/trunk@3287
0/*regparms*/,
"amd64g_dirtyhelper_FINIT",
&amd64g_dirtyhelper_FINIT,
- mkIRExprVec_1( IRExpr_BBPTR() )
+ mkIRExprVec_1( IRExpr_GSPTR() )
);
/* declare we're writing guest state */
0/*regparms*/,
"amd64g_dirtyhelper_FLDENV",
&amd64g_dirtyhelper_FLDENV,
- mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
+ mkIRExprVec_2( IRExpr_GSPTR(), mkexpr(addr) )
);
d->tmp = w64;
/* declare we're reading memory */
0/*regparms*/,
"amd64g_dirtyhelper_FSTENV",
&amd64g_dirtyhelper_FSTENV,
- mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
+ mkIRExprVec_2( IRExpr_GSPTR(), mkexpr(addr) )
);
/* declare we're writing memory */
d->mFx = Ifx_Write;
0/*regparms*/,
"amd64g_dirtyhelper_FRSTOR",
&amd64g_dirtyhelper_FRSTOR,
- mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
+ mkIRExprVec_2( IRExpr_GSPTR(), mkexpr(addr) )
);
d->mSize = 108;
}
0/*regparms*/,
"amd64g_dirtyhelper_FNSAVE",
&amd64g_dirtyhelper_FNSAVE,
- mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
+ mkIRExprVec_2( IRExpr_GSPTR(), mkexpr(addr) )
);
d->mSize = 108;
}
0/*regparms*/,
"amd64g_dirtyhelper_XSAVE_COMPONENT_0",
&amd64g_dirtyhelper_XSAVE_COMPONENT_0,
- mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
+ mkIRExprVec_2( IRExpr_GSPTR(), mkexpr(addr) )
);
d0->guard = binop(Iop_CmpEQ64, binop(Iop_And64, mkexpr(rfbm), mkU64(1)),
mkU64(1));
0/*regparms*/,
"amd64g_dirtyhelper_XSAVE_COMPONENT_1_EXCLUDING_XMMREGS",
&amd64g_dirtyhelper_XSAVE_COMPONENT_1_EXCLUDING_XMMREGS,
- mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
+ mkIRExprVec_2( IRExpr_GSPTR(), mkexpr(addr) )
);
d1->guard = guard_1or2;
0/*regparms*/,
"amd64g_dirtyhelper_XRSTOR_COMPONENT_0",
&amd64g_dirtyhelper_XRSTOR_COMPONENT_0,
- mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
+ mkIRExprVec_2( IRExpr_GSPTR(), mkexpr(addr) )
);
d0->guard = binop(Iop_CmpNE64, mkexpr(restore_0), mkU64(0));
0/*regparms*/,
"amd64g_dirtyhelper_XRSTOR_COMPONENT_1_EXCLUDING_XMMREGS",
&amd64g_dirtyhelper_XRSTOR_COMPONENT_1_EXCLUDING_XMMREGS,
- mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
+ mkIRExprVec_2( IRExpr_GSPTR(), mkexpr(addr) )
) ;
d1->guard = restore_1or2e;
IRExpr* gstOffLe = mkU64(gstOffL);
IRExpr* gstOffRe = mkU64(gstOffR);
IRExpr** args
- = mkIRExprVec_5( IRExpr_BBPTR(), opc4, gstOffDe, gstOffLe, gstOffRe );
+ = mkIRExprVec_5( IRExpr_GSPTR(), opc4, gstOffDe, gstOffLe, gstOffRe );
IRDirty* d = unsafeIRDirty_0_N( 0/*regparms*/, nm, fn, args );
/* It's not really a dirty call, but we can't use the clean helper
IRExpr* gstOffLe = mkU64(gstOffL);
IRExpr* gstOffRe = mkU64(gstOffR);
IRExpr** args
- = mkIRExprVec_4( IRExpr_BBPTR(), imme, gstOffLe, gstOffRe );
+ = mkIRExprVec_4( IRExpr_GSPTR(), imme, gstOffLe, gstOffRe );
IRDirty* d = unsafeIRDirty_0_N( 0/*regparms*/, nm, fn, args );
/* It's not really a dirty call, but we can't use the clean helper
IRExpr* edxIN = isISTRx ? mkU64(0) : getIRegRDX(8);
IRExpr* eaxIN = isISTRx ? mkU64(0) : getIRegRAX(8);
IRExpr** args
- = mkIRExprVec_6( IRExpr_BBPTR(),
+ = mkIRExprVec_6( IRExpr_GSPTR(),
opc4_and_imm, gstOffLe, gstOffRe, edxIN, eaxIN );
IRTemp resT = newTemp(Ity_I64);
void* fAddr = &amd64g_dirtyhelper_RDTSCP;
IRDirty* d
= unsafeIRDirty_0_N ( 0/*regparms*/,
- fName, fAddr, mkIRExprVec_1(IRExpr_BBPTR()) );
+ fName, fAddr, mkIRExprVec_1(IRExpr_GSPTR()) );
/* declare guest state effects */
d->nFxState = 3;
vex_bzero(&d->fxState, sizeof(d->fxState));
vassert(fName); vassert(fAddr);
d = unsafeIRDirty_0_N ( 0/*regparms*/,
- fName, fAddr, mkIRExprVec_1(IRExpr_BBPTR()) );
+ fName, fAddr, mkIRExprVec_1(IRExpr_GSPTR()) );
/* declare guest state effects */
d->nFxState = 4;
vex_bzero(&d->fxState, sizeof(d->fxState));
{
IRDirty *d;
IRTemp fcsr = newTemp(Ity_I32);
- /* IRExpr_BBPTR() => Need to pass pointer to guest state to helper. */
+ /* IRExpr_GSPTR() => Need to pass pointer to guest state to helper. */
if (fp_mode64)
d = unsafeIRDirty_1_N(fcsr, 0,
"mips_dirtyhelper_calculate_FCSR_fp64",
&mips_dirtyhelper_calculate_FCSR_fp64,
- mkIRExprVec_4(IRExpr_BBPTR(),
+ mkIRExprVec_4(IRExpr_GSPTR(),
mkU32(fs),
mkU32(ft),
mkU32(inst)));
d = unsafeIRDirty_1_N(fcsr, 0,
"mips_dirtyhelper_calculate_FCSR_fp32",
&mips_dirtyhelper_calculate_FCSR_fp32,
- mkIRExprVec_4(IRExpr_BBPTR(),
+ mkIRExprVec_4(IRExpr_GSPTR(),
mkU32(fs),
mkU32(ft),
mkU32(inst)));
IRDirty* d;
UInt vD_off = vectorGuestRegOffset(vD_addr);
IRExpr** args_be = mkIRExprVec_5(
- IRExpr_BBPTR(),
+ IRExpr_GSPTR(),
mkU32(vD_off),
binop(Iop_And32, mkNarrowTo32(ty, mkexpr(EA)),
mkU32(0xF)),
mkU32(0)/*left*/,
mkU32(1)/*Big Endian*/);
IRExpr** args_le = mkIRExprVec_5(
- IRExpr_BBPTR(),
+ IRExpr_GSPTR(),
mkU32(vD_off),
binop(Iop_And32, mkNarrowTo32(ty, mkexpr(EA)),
mkU32(0xF)),
IRDirty* d;
UInt vD_off = vectorGuestRegOffset(vD_addr);
IRExpr** args_be = mkIRExprVec_5(
- IRExpr_BBPTR(),
+ IRExpr_GSPTR(),
mkU32(vD_off),
binop(Iop_And32, mkNarrowTo32(ty, mkexpr(EA)),
mkU32(0xF)),
mkU32(1)/*right*/,
mkU32(1)/*Big Endian*/);
IRExpr** args_le = mkIRExprVec_5(
- IRExpr_BBPTR(),
+ IRExpr_GSPTR(),
mkU32(vD_off),
binop(Iop_And32, mkNarrowTo32(ty, mkexpr(EA)),
mkU32(0xF)),
IRDirty *d;
IRTemp cc = newTemp(Ity_I64);
- /* IRExpr_BBPTR() => Need to pass pointer to guest state to helper */
+ /* IRExpr_GSPTR() => Need to pass pointer to guest state to helper */
d = unsafeIRDirty_1_N(cc, 0, "s390x_dirtyhelper_STFLE",
&s390x_dirtyhelper_STFLE,
- mkIRExprVec_2(IRExpr_BBPTR(), mkexpr(op2addr)));
+ mkIRExprVec_2(IRExpr_GSPTR(), mkexpr(op2addr)));
d->nFxState = 1;
vex_bzero(&d->fxState, sizeof(d->fxState));
0/*regparms*/,
"x86g_dirtyhelper_FLDENV",
&x86g_dirtyhelper_FLDENV,
- mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
+ mkIRExprVec_2( IRExpr_GSPTR(), mkexpr(addr) )
);
d->tmp = ew;
/* declare we're reading memory */
0/*regparms*/,
"x86g_dirtyhelper_FSTENV",
&x86g_dirtyhelper_FSTENV,
- mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
+ mkIRExprVec_2( IRExpr_GSPTR(), mkexpr(addr) )
);
/* declare we're writing memory */
d->mFx = Ifx_Write;
0/*regparms*/,
"x86g_dirtyhelper_FINIT",
&x86g_dirtyhelper_FINIT,
- mkIRExprVec_1(IRExpr_BBPTR())
+ mkIRExprVec_1(IRExpr_GSPTR())
);
/* declare we're writing guest state */
0/*regparms*/,
"x86g_dirtyhelper_FRSTOR",
&x86g_dirtyhelper_FRSTOR,
- mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
+ mkIRExprVec_2( IRExpr_GSPTR(), mkexpr(addr) )
);
d->tmp = ew;
/* declare we're reading memory */
0/*regparms*/,
"x86g_dirtyhelper_FSAVE",
&x86g_dirtyhelper_FSAVE,
- mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
+ mkIRExprVec_2( IRExpr_GSPTR(), mkexpr(addr) )
);
/* declare we're writing memory */
d->mFx = Ifx_Write;
0/*regparms*/,
"x86g_dirtyhelper_FXSAVE",
&x86g_dirtyhelper_FXSAVE,
- mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
+ mkIRExprVec_2( IRExpr_GSPTR(), mkexpr(addr) )
);
/* declare we're writing memory */
0/*regparms*/,
"x86g_dirtyhelper_FXRSTOR",
&x86g_dirtyhelper_FXRSTOR,
- mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
+ mkIRExprVec_2( IRExpr_GSPTR(), mkexpr(addr) )
);
/* declare we're reading memory */
vassert(fName); vassert(fAddr);
d = unsafeIRDirty_0_N ( 0/*regparms*/,
- fName, fAddr, mkIRExprVec_1(IRExpr_BBPTR()) );
+ fName, fAddr, mkIRExprVec_1(IRExpr_GSPTR()) );
/* declare guest state effects */
d->nFxState = 4;
vex_bzero(&d->fxState, sizeof(d->fxState));
vassert(e->tag != Iex_VECRET);
/* In this case we give out a copy of the BaseBlock pointer. */
- if (UNLIKELY(e->tag == Iex_BBPTR)) {
+ if (UNLIKELY(e->tag == Iex_GSPTR)) {
return mk_iMOVsd_RR( hregAMD64_RBP(), dst );
}
*retloc = mk_RetLoc_INVALID();
/* These are used for cross-checking that IR-level constraints on
- the use of IRExpr_VECRET() and IRExpr_BBPTR() are observed. */
+ the use of IRExpr_VECRET() and IRExpr_GSPTR() are observed. */
UInt nVECRETs = 0;
- UInt nBBPTRs = 0;
+ UInt nGSPTRs = 0;
/* Marshal args for a call and do the call.
stack, it is enough to preallocate the return space before
marshalling any arguments, in this case.
- |args| may also contain IRExpr_BBPTR(), in which case the
+ |args| may also contain IRExpr_GSPTR(), in which case the
value in %rbp is passed as the corresponding argument.
Generating code which is both efficient and correct when
/* FAST SCHEME */
/* In this loop, we process args that can be computed into the
destination (real) register with a single instruction, without
- using any fixed regs. That also includes IRExpr_BBPTR(), but
+ using any fixed regs. That also includes IRExpr_GSPTR(), but
not IRExpr_VECRET(). Indeed, if the IR is well-formed, we can
never see IRExpr_VECRET() at this point, since the return-type
check above should ensure all those cases use the slow scheme
vassert(n_args >= 0 && n_args <= 6);
for (i = 0; i < n_args; i++) {
IRExpr* arg = args[i];
- if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg))) {
+ if (LIKELY(!is_IRExpr_VECRET_or_GSPTR(arg))) {
vassert(typeOfIRExpr(env->type_env, args[i]) == Ity_I64);
}
fastinstrs[i]
vassert(n_args >= 0 && n_args <= 6);
for (i = 0; i < n_args; i++) {
IRExpr* arg = args[i];
- if (UNLIKELY(arg->tag == Iex_BBPTR)) {
+ if (UNLIKELY(arg->tag == Iex_GSPTR)) {
tmpregs[i] = newVRegI(env);
addInstr(env, mk_iMOVsd_RR( hregAMD64_RBP(), tmpregs[i]));
- nBBPTRs++;
+ nGSPTRs++;
}
else if (UNLIKELY(arg->tag == Iex_VECRET)) {
/* We stashed the address of the return slot earlier, so just
vassert(nVECRETs == 0);
}
- vassert(nBBPTRs == 0 || nBBPTRs == 1);
+ vassert(nGSPTRs == 0 || nGSPTRs == 1);
vassert(*stackAdjustAfterCall == 0);
vassert(is_RetLoc_INVALID(*retloc));
static
Bool mightRequireFixedRegs ( IRExpr* e )
{
- if (UNLIKELY(is_IRExpr_VECRET_or_BBPTR(e))) {
+ if (UNLIKELY(is_IRExpr_VECRET_or_GSPTR(e))) {
// These are always "safe" -- either a copy of SP in some
// arbitrary vreg, or a copy of x21, respectively.
return False;
*retloc = mk_RetLoc_INVALID();
/* These are used for cross-checking that IR-level constraints on
- the use of IRExpr_VECRET() and IRExpr_BBPTR() are observed. */
+ the use of IRExpr_VECRET() and IRExpr_GSPTR() are observed. */
UInt nVECRETs = 0;
- UInt nBBPTRs = 0;
+ UInt nGSPTRs = 0;
/* Marshal args for a call and do the call.
preallocate the return space before marshalling any arguments,
in this case.
- |args| may also contain IRExpr_BBPTR(), in which case the
+ |args| may also contain IRExpr_GSPTR(), in which case the
value in x21 is passed as the corresponding argument.
Generating code which is both efficient and correct when
IRExpr* arg = args[i];
if (UNLIKELY(arg->tag == Iex_VECRET)) {
nVECRETs++;
- } else if (UNLIKELY(arg->tag == Iex_BBPTR)) {
- nBBPTRs++;
+ } else if (UNLIKELY(arg->tag == Iex_GSPTR)) {
+ nGSPTRs++;
}
n_args++;
}
/* If this fails, the IR is ill-formed */
- vassert(nBBPTRs == 0 || nBBPTRs == 1);
+ vassert(nGSPTRs == 0 || nGSPTRs == 1);
/* If we have a VECRET, allocate space on the stack for the return
value, and record the stack pointer after that. */
IRExpr* arg = args[i];
IRType aTy = Ity_INVALID;
- if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
+ if (LIKELY(!is_IRExpr_VECRET_or_GSPTR(arg)))
aTy = typeOfIRExpr(env->type_env, args[i]);
if (nextArgReg >= ARM64_N_ARGREGS)
iselIntExpr_R(env, args[i]) ));
nextArgReg++;
}
- else if (arg->tag == Iex_BBPTR) {
+ else if (arg->tag == Iex_GSPTR) {
vassert(0); //ATC
addInstr(env, ARM64Instr_MovI( argregs[nextArgReg],
hregARM64_X21() ));
IRExpr* arg = args[i];
IRType aTy = Ity_INVALID;
- if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
+ if (LIKELY(!is_IRExpr_VECRET_or_GSPTR(arg)))
aTy = typeOfIRExpr(env->type_env, args[i]);
if (nextArgReg >= ARM64_N_ARGREGS)
tmpregs[nextArgReg] = iselIntExpr_R(env, args[i]);
nextArgReg++;
}
- else if (arg->tag == Iex_BBPTR) {
+ else if (arg->tag == Iex_GSPTR) {
vassert(0); //ATC
tmpregs[nextArgReg] = hregARM64_X21();
nextArgReg++;
/* Do final checks, set the return values, and generate the call
instruction proper. */
- vassert(nBBPTRs == 0 || nBBPTRs == 1);
+ vassert(nGSPTRs == 0 || nGSPTRs == 1);
vassert(nVECRETs == (retTy == Ity_V128 || retTy == Ity_V256) ? 1 : 0);
vassert(*stackAdjustAfterCall == 0);
vassert(is_RetLoc_INVALID(*retloc));
static
Bool mightRequireFixedRegs ( IRExpr* e )
{
- if (UNLIKELY(is_IRExpr_VECRET_or_BBPTR(e))) {
+ if (UNLIKELY(is_IRExpr_VECRET_or_GSPTR(e))) {
// These are always "safe" -- either a copy of r13(sp) in some
// arbitrary vreg, or a copy of r8, respectively.
return False;
UInt n_real_args = 0;
for (i = 1; args[i]; i++) {
IRExpr* arg = args[i];
- if (UNLIKELY(is_IRExpr_VECRET_or_BBPTR(arg)))
+ if (UNLIKELY(is_IRExpr_VECRET_or_GSPTR(arg)))
goto no_match;
IRType argTy = typeOfIRExpr(env->type_env, arg);
if (UNLIKELY(argTy != Ity_I32))
*retloc = mk_RetLoc_INVALID();
/* These are used for cross-checking that IR-level constraints on
- the use of IRExpr_VECRET() and IRExpr_BBPTR() are observed. */
+ the use of IRExpr_VECRET() and IRExpr_GSPTR() are observed. */
UInt nVECRETs = 0;
- UInt nBBPTRs = 0;
+ UInt nGSPTRs = 0;
/* Marshal args for a call and do the call.
preallocate the return space before marshalling any arguments,
in this case.
- |args| may also contain IRExpr_BBPTR(), in which case the
+ |args| may also contain IRExpr_GSPTR(), in which case the
value in r8 is passed as the corresponding argument.
Generating code which is both efficient and correct when
IRExpr* arg = args[i];
if (UNLIKELY(arg->tag == Iex_VECRET)) {
nVECRETs++;
- } else if (UNLIKELY(arg->tag == Iex_BBPTR)) {
- nBBPTRs++;
+ } else if (UNLIKELY(arg->tag == Iex_GSPTR)) {
+ nGSPTRs++;
}
n_args++;
}
IRExpr* arg = args[i];
IRType aTy = Ity_INVALID;
- if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
+ if (LIKELY(!is_IRExpr_VECRET_or_GSPTR(arg)))
aTy = typeOfIRExpr(env->type_env, arg);
if (nextArgReg >= ARM_N_ARGREGS)
addInstr(env, mk_iMOVds_RR( argregs[nextArgReg], raHi ));
nextArgReg++;
}
- else if (arg->tag == Iex_BBPTR) {
+ else if (arg->tag == Iex_GSPTR) {
vassert(0); //ATC
addInstr(env, mk_iMOVds_RR( argregs[nextArgReg],
hregARM_R8() ));
IRExpr* arg = args[i];
IRType aTy = Ity_INVALID;
- if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
+ if (LIKELY(!is_IRExpr_VECRET_or_GSPTR(arg)))
aTy = typeOfIRExpr(env->type_env, arg);
if (nextArgReg >= ARM_N_ARGREGS)
tmpregs[nextArgReg] = raHi;
nextArgReg++;
}
- else if (arg->tag == Iex_BBPTR) {
+ else if (arg->tag == Iex_GSPTR) {
vassert(0); //ATC
tmpregs[nextArgReg] = hregARM_R8();
nextArgReg++;
/* Do final checks, set the return values, and generate the call
instruction proper. */
- vassert(nBBPTRs == 0 || nBBPTRs == 1);
+ vassert(nGSPTRs == 0 || nGSPTRs == 1);
vassert(nVECRETs == (retTy == Ity_V128 || retTy == Ity_V256) ? 1 : 0);
vassert(*stackAdjustAfterCall == 0);
vassert(is_RetLoc_INVALID(*retloc));
*retloc = mk_RetLoc_INVALID();
/* These are used for cross-checking that IR-level constraints on
- the use of IRExpr_VECRET() and IRExpr_BBPTR() are observed. */
+ the use of IRExpr_VECRET() and IRExpr_GSPTR() are observed. */
UInt nVECRETs = 0;
- UInt nBBPTRs = 0;
+ UInt nGSPTRs = 0;
/* MIPS O32 calling convention: up to four registers ($a0 ... $a3)
are allowed to be used for passing integer arguments. They correspond
stack, it is enough to preallocate the return space before
marshalling any arguments, in this case.
- |args| may also contain IRExpr_BBPTR(), in which case the value
+ |args| may also contain IRExpr_GSPTR(), in which case the value
in the guest state pointer register is passed as the
corresponding argument. */
IRExpr* arg = args[i];
if (UNLIKELY(arg->tag == Iex_VECRET)) {
nVECRETs++;
- } else if (UNLIKELY(arg->tag == Iex_BBPTR)) {
- nBBPTRs++;
+ } else if (UNLIKELY(arg->tag == Iex_GSPTR)) {
+ nGSPTRs++;
}
n_args++;
}
vassert(argreg < MIPS_N_REGPARMS);
IRType aTy = Ity_INVALID;
- if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
+ if (LIKELY(!is_IRExpr_VECRET_or_GSPTR(arg)))
aTy = typeOfIRExpr(env->type_env, arg);
if (aTy == Ity_I32 || mode64) {
argiregs |= (1 << (argreg + 4));
addInstr(env, mk_iMOVds_RR( argregs[argreg], rLo));
argreg++;
- } else if (arg->tag == Iex_BBPTR) {
+ } else if (arg->tag == Iex_GSPTR) {
vassert(0); // ATC
addInstr(env, mk_iMOVds_RR(argregs[argreg],
GuestStatePointer(mode64)));
IRExpr* arg = args[i];
IRType aTy = Ity_INVALID;
- if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
+ if (LIKELY(!is_IRExpr_VECRET_or_GSPTR(arg)))
aTy = typeOfIRExpr(env->type_env, arg);
- if (aTy == Ity_I32 || (mode64 && arg->tag != Iex_BBPTR)) {
+ if (aTy == Ity_I32 || (mode64 && arg->tag != Iex_GSPTR)) {
tmpregs[argreg] = iselWordExpr_R(env, arg);
argreg++;
} else if (aTy == Ity_I64) { /* Ity_I64 */
argreg++;
tmpregs[argreg] = raHi;
argreg++;
- } else if (arg->tag == Iex_BBPTR) {
+ } else if (arg->tag == Iex_GSPTR) {
tmpregs[argreg] = GuestStatePointer(mode64);
argreg++;
}
/* Do final checks, set the return values, and generate the call
instruction proper. */
- vassert(nBBPTRs == 0 || nBBPTRs == 1);
+ vassert(nGSPTRs == 0 || nGSPTRs == 1);
vassert(nVECRETs == (retTy == Ity_V128 || retTy == Ity_V256) ? 1 : 0);
vassert(*stackAdjustAfterCall == 0);
vassert(is_RetLoc_INVALID(*retloc));
*retloc = mk_RetLoc_INVALID();
/* These are used for cross-checking that IR-level constraints on
- the use of IRExpr_VECRET() and IRExpr_BBPTR() are observed. */
+ the use of IRExpr_VECRET() and IRExpr_GSPTR() are observed. */
UInt nVECRETs = 0;
- UInt nBBPTRs = 0;
+ UInt nGSPTRs = 0;
/* Marshal args for a call and do the call.
stack, it is enough to preallocate the return space before
marshalling any arguments, in this case.
- |args| may also contain IRExpr_BBPTR(), in which case the value
+ |args| may also contain IRExpr_GSPTR(), in which case the value
in the guest state pointer register is passed as the
corresponding argument.
if (go_fast) {
for (i = 0; i < n_args; i++) {
IRExpr* arg = args[i];
- if (UNLIKELY(arg->tag == Iex_BBPTR)) {
+ if (UNLIKELY(arg->tag == Iex_GSPTR)) {
/* that's OK */
}
else if (UNLIKELY(arg->tag == Iex_VECRET)) {
IRExpr* arg = args[i];
vassert(argreg < PPC_N_REGPARMS);
- if (arg->tag == Iex_BBPTR) {
+ if (arg->tag == Iex_GSPTR) {
argiregs |= (1 << (argreg+3));
addInstr(env, mk_iMOVds_RR( argregs[argreg],
GuestStatePtr(mode64) ));
for (i = 0; i < n_args; i++) {
IRExpr* arg = args[i];
vassert(argreg < PPC_N_REGPARMS);
- if (UNLIKELY(arg->tag == Iex_BBPTR)) {
+ if (UNLIKELY(arg->tag == Iex_GSPTR)) {
tmpregs[argreg] = newVRegI(env);
addInstr(env, mk_iMOVds_RR( tmpregs[argreg],
GuestStatePtr(mode64) ));
- nBBPTRs++;
+ nGSPTRs++;
}
else if (UNLIKELY(arg->tag == Iex_VECRET)) {
/* We stashed the address of the return slot earlier, so just
vassert(nVECRETs == 0);
}
- vassert(nBBPTRs == 0 || nBBPTRs == 1);
+ vassert(nGSPTRs == 0 || nGSPTRs == 1);
vassert(*stackAdjustAfterCall == 0);
vassert(is_RetLoc_INVALID(*retloc));
special node IRExpr_VECRET(). For s390, however, V128 and V256 return
values do not occur as we generally do not support vector types.
- |args| may also contain IRExpr_BBPTR(), in which case the value
+ |args| may also contain IRExpr_GSPTR(), in which case the value
in the guest state pointer register is passed as the
corresponding argument.
These are used for cross-checking that IR-level constraints on
- the use of IRExpr_VECRET() and IRExpr_BBPTR() are observed. */
+ the use of IRExpr_VECRET() and IRExpr_GSPTR() are observed. */
UInt nVECRETs = 0;
- UInt nBBPTRs = 0;
+ UInt nGSPTRs = 0;
n_args = 0;
for (i = 0; args[i]; i++)
for (i = 0; i < n_args; ++i) {
if (UNLIKELY(args[i]->tag == Iex_VECRET)) {
nVECRETs++;
- } else if (UNLIKELY(args[i]->tag == Iex_BBPTR)) {
- nBBPTRs++;
+ } else if (UNLIKELY(args[i]->tag == Iex_GSPTR)) {
+ nGSPTRs++;
} else {
IRType type = typeOfIRExpr(env->type_env, args[i]);
if (type != Ity_I64) {
vpanic("cannot continue due to errors in argument passing");
/* If these fail, the IR is ill-formed */
- vassert(nBBPTRs == 0 || nBBPTRs == 1);
+ vassert(nGSPTRs == 0 || nGSPTRs == 1);
vassert(nVECRETs == 0);
argreg = 0;
/* Compute the function arguments into a temporary register each */
for (i = 0; i < n_args; i++) {
IRExpr *arg = args[i];
- if (UNLIKELY(arg->tag == Iex_BBPTR)) {
+ if (UNLIKELY(arg->tag == Iex_GSPTR)) {
/* If we need the guest state pointer put it in a temporary arg reg */
tmpregs[argreg] = newVRegI(env);
addInstr(env, s390_insn_move(sizeof(ULong), tmpregs[argreg],
UInt nVECRETs = 0;
- UInt nBBPTRs = 0;
+ UInt nGSPTRs = 0;
/* TILEGX calling convention: up to 10 registers (r0 ... r9)
are allowed to be used for passing integer arguments. They correspond
IRExpr* arg = args[i];
if (UNLIKELY(arg->tag == Iex_VECRET)) {
nVECRETs++;
- } else if (UNLIKELY(arg->tag == Iex_BBPTR)) {
- nBBPTRs++;
+ } else if (UNLIKELY(arg->tag == Iex_GSPTR)) {
+ nGSPTRs++;
}
}
- if (nVECRETs || nBBPTRs)
- vex_printf("nVECRETs=%u, nBBPTRs=%u\n",
- nVECRETs, nBBPTRs);
+ if (nVECRETs || nGSPTRs)
+ vex_printf("nVECRETs=%u, nGSPTRs=%u\n",
+ nVECRETs, nGSPTRs);
if (TILEGX_N_REGPARMS < n_args) {
vpanic("doHelperCall(TILEGX): cannot currently handle > 10 args");
addInstr(env, X86Instr_Push(X86RMI_Reg(r_vecRetAddr)));
return 1;
}
- if (UNLIKELY(arg->tag == Iex_BBPTR)) {
+ if (UNLIKELY(arg->tag == Iex_GSPTR)) {
addInstr(env, X86Instr_Push(X86RMI_Reg(hregX86_EBP())));
return 1;
}
static
Bool mightRequireFixedRegs ( IRExpr* e )
{
- if (UNLIKELY(is_IRExpr_VECRET_or_BBPTR(e))) {
+ if (UNLIKELY(is_IRExpr_VECRET_or_GSPTR(e))) {
// These are always "safe" -- either a copy of %esp in some
// arbitrary vreg, or a copy of %ebp, respectively.
return False;
*retloc = mk_RetLoc_INVALID();
/* These are used for cross-checking that IR-level constraints on
- the use of Iex_VECRET and Iex_BBPTR are observed. */
+ the use of Iex_VECRET and Iex_GSPTR are observed. */
UInt nVECRETs = 0;
- UInt nBBPTRs = 0;
+ UInt nGSPTRs = 0;
/* Marshal args for a call, do the call, and clear the stack.
Complexities to consider:
is enough to preallocate the return space before marshalling
any arguments, in this case.
- |args| may also contain IRExpr_BBPTR(), in which case the
+ |args| may also contain IRExpr_GSPTR(), in which case the
value in %ebp is passed as the corresponding argument.
* If the callee claims regparmness of 1, 2 or 3, we must pass the
n_args++;
if (UNLIKELY(arg->tag == Iex_VECRET)) {
nVECRETs++;
- } else if (UNLIKELY(arg->tag == Iex_BBPTR)) {
- nBBPTRs++;
+ } else if (UNLIKELY(arg->tag == Iex_GSPTR)) {
+ nGSPTRs++;
}
}
/* If this fails, the IR is ill-formed */
- vassert(nBBPTRs == 0 || nBBPTRs == 1);
+ vassert(nGSPTRs == 0 || nGSPTRs == 1);
/* If we have a VECRET, allocate space on the stack for the return
value, and record the stack pointer after that. */
if (UNLIKELY(arg->tag == Iex_VECRET)) {
vassert(0); //ATC
}
- else if (UNLIKELY(arg->tag == Iex_BBPTR)) {
+ else if (UNLIKELY(arg->tag == Iex_GSPTR)) {
vassert(0); //ATC
} else {
vassert(typeOfIRExpr(env->type_env, arg) == Ity_I32);
X86RMI_Reg(r_vecRetAddr),
argregs[argreg]));
}
- else if (UNLIKELY(arg->tag == Iex_BBPTR)) {
+ else if (UNLIKELY(arg->tag == Iex_GSPTR)) {
vassert(0); //ATC
} else {
vassert(typeOfIRExpr(env->type_env, arg) == Ity_I32);
case Iex_VECRET:
vex_printf("VECRET");
break;
- case Iex_BBPTR:
- vex_printf("BBPTR");
+ case Iex_GSPTR:
+ vex_printf("GSPTR");
break;
default:
vpanic("ppIRExpr");
e->tag = Iex_VECRET;
return e;
}
-IRExpr* IRExpr_BBPTR ( void ) {
+IRExpr* IRExpr_GSPTR ( void ) {
IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr));
- e->tag = Iex_BBPTR;
+ e->tag = Iex_GSPTR;
return e;
}
case Iex_VECRET:
return IRExpr_VECRET();
- case Iex_BBPTR:
- return IRExpr_BBPTR();
+ case Iex_GSPTR:
+ return IRExpr_GSPTR();
case Iex_Binder:
return IRExpr_Binder(e->Iex.Binder.binder);
vpanic("typeOfIRExpr: Binder is not a valid expression");
case Iex_VECRET:
vpanic("typeOfIRExpr: VECRET is not a valid expression");
- case Iex_BBPTR:
- vpanic("typeOfIRExpr: BBPTR is not a valid expression");
+ case Iex_GSPTR:
+ vpanic("typeOfIRExpr: GSPTR is not a valid expression");
default:
ppIRExpr(e);
vpanic("typeOfIRExpr");
}
*/
-static inline Bool isIRAtom_or_VECRET_or_BBPTR ( const IRExpr* e )
+static inline Bool isIRAtom_or_VECRET_or_GSPTR ( const IRExpr* e )
{
if (isIRAtom(e)) {
return True;
}
- return UNLIKELY(is_IRExpr_VECRET_or_BBPTR(e));
+ return UNLIKELY(is_IRExpr_VECRET_or_GSPTR(e));
}
Bool isFlatIRStmt ( const IRStmt* st )
if (!isIRAtom(di->guard))
return False;
for (i = 0; di->args[i]; i++)
- if (!isIRAtom_or_VECRET_or_BBPTR(di->args[i]))
+ if (!isIRAtom_or_VECRET_or_GSPTR(di->args[i]))
return False;
if (di->mAddr && !isIRAtom(di->mAddr))
return False;
case Iex_CCall:
for (i = 0; expr->Iex.CCall.args[i]; i++) {
const IRExpr* arg = expr->Iex.CCall.args[i];
- if (UNLIKELY(is_IRExpr_VECRET_or_BBPTR(arg))) {
+ if (UNLIKELY(is_IRExpr_VECRET_or_GSPTR(arg))) {
/* These aren't allowed in CCall lists. Let's detect
and throw them out here, though, rather than
segfaulting a bit later on. */
d = stmt->Ist.Dirty.details;
for (i = 0; d->args[i] != NULL; i++) {
IRExpr* arg = d->args[i];
- if (UNLIKELY(is_IRExpr_VECRET_or_BBPTR(arg))) {
+ if (UNLIKELY(is_IRExpr_VECRET_or_GSPTR(arg))) {
/* This is ensured by isFlatIRStmt */
;
} else {
if (i >= 32)
sanityCheckFail(bb,stmt,"Iex.CCall: > 32 args");
IRExpr* arg = expr->Iex.CCall.args[i];
- if (UNLIKELY(is_IRExpr_VECRET_or_BBPTR(arg)))
- sanityCheckFail(bb,stmt,"Iex.CCall.args: is VECRET/BBPTR");
+ if (UNLIKELY(is_IRExpr_VECRET_or_GSPTR(arg)))
+ sanityCheckFail(bb,stmt,"Iex.CCall.args: is VECRET/GSPTR");
tcExpr(bb,stmt, arg, gWordTy);
}
if (expr->Iex.CCall.retty == Ity_I1)
if (retTy == Ity_I1)
sanityCheckFail(bb,stmt,"IRStmt.Dirty.dst :: Ity_I1");
}
- UInt nVECRETs = 0, nBBPTRs = 0;
+ UInt nVECRETs = 0, nGSPTRs = 0;
for (i = 0; d->args[i] != NULL; i++) {
if (i >= 32)
sanityCheckFail(bb,stmt,"IRStmt.Dirty: > 32 args");
const IRExpr* arg = d->args[i];
if (UNLIKELY(arg->tag == Iex_VECRET)) {
nVECRETs++;
- } else if (UNLIKELY(arg->tag == Iex_BBPTR)) {
- nBBPTRs++;
+ } else if (UNLIKELY(arg->tag == Iex_GSPTR)) {
+ nGSPTRs++;
} else {
if (typeOfIRExpr(tyenv, arg) == Ity_I1)
sanityCheckFail(bb,stmt,"IRStmt.Dirty.arg[i] :: Ity_I1");
}
- if (nBBPTRs > 1) {
- sanityCheckFail(bb,stmt,"IRStmt.Dirty.args: > 1 BBPTR arg");
+ if (nGSPTRs > 1) {
+ sanityCheckFail(bb,stmt,"IRStmt.Dirty.args: > 1 GSPTR arg");
}
if (nVECRETs == 1) {
/* Fn must return V128 or V256. */
"IRStmt.Dirty.args: > 1 VECRET present");
}
}
- if (nBBPTRs > 1) {
+ if (nGSPTRs > 1) {
sanityCheckFail(bb,stmt,
- "IRStmt.Dirty.args: > 1 BBPTR present");
+ "IRStmt.Dirty.args: > 1 GSPTR present");
}
/* If you ask for the baseblock pointer, you have to make
some declaration about access to the guest state too. */
- if (d->nFxState == 0 && nBBPTRs != 0) {
+ if (d->nFxState == 0 && nGSPTRs != 0) {
sanityCheckFail(bb,stmt,
- "IRStmt.Dirty.args: BBPTR requested, "
+ "IRStmt.Dirty.args: GSPTR requested, "
"but no fxState declared");
}
break;
d2->guard = flatten_Expr(bb, d2->guard);
for (i = 0; d2->args[i]; i++) {
IRExpr* arg = d2->args[i];
- if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
+ if (LIKELY(!is_IRExpr_VECRET_or_GSPTR(arg)))
d2->args[i] = flatten_Expr(bb, arg);
}
addStmtToIRSB(bb, IRStmt_Dirty(d2));
d2->guard = fold_Expr(env, subst_Expr(env, d2->guard));
for (i = 0; d2->args[i]; i++) {
IRExpr* arg = d2->args[i];
- if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg))) {
+ if (LIKELY(!is_IRExpr_VECRET_or_GSPTR(arg))) {
vassert(isIRAtom(arg));
d2->args[i] = fold_Expr(env, subst_Expr(env, arg));
}
addUses_Expr(set, d->guard);
for (i = 0; d->args[i] != NULL; i++) {
IRExpr* arg = d->args[i];
- if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
+ if (LIKELY(!is_IRExpr_VECRET_or_GSPTR(arg)))
addUses_Expr(set, arg);
}
return;
deltaIRExpr(d->guard, delta);
for (i = 0; d->args[i]; i++) {
IRExpr* arg = d->args[i];
- if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
+ if (LIKELY(!is_IRExpr_VECRET_or_GSPTR(arg)))
deltaIRExpr(arg, delta);
}
if (d->tmp != IRTemp_INVALID)
aoccCount_Expr(uses, d->guard);
for (i = 0; d->args[i]; i++) {
IRExpr* arg = d->args[i];
- if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
+ if (LIKELY(!is_IRExpr_VECRET_or_GSPTR(arg)))
aoccCount_Expr(uses, arg);
}
return;
d2->guard = atbSubst_Expr(env, d2->guard);
for (i = 0; d2->args[i]; i++) {
IRExpr* arg = d2->args[i];
- if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
+ if (LIKELY(!is_IRExpr_VECRET_or_GSPTR(arg)))
d2->args[i] = atbSubst_Expr(env, arg);
}
return IRStmt_Dirty(d2);
guest state under the covers. It's not allowed, but let's be
extra conservative and assume the worst. */
for (i = 0; d->args[i]; i++) {
- if (UNLIKELY(d->args[i]->tag == Iex_BBPTR)) {
+ if (UNLIKELY(d->args[i]->tag == Iex_GSPTR)) {
*requiresPreciseMemExns = True;
/* Assume all guest state is written. */
interval.present = True;
vassert(isIRAtom(d->guard));
for (Int j = 0; d->args[j]; j++) {
IRExpr* arg = d->args[j];
- if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg))) {
+ if (LIKELY(!is_IRExpr_VECRET_or_GSPTR(arg))) {
vassert(isIRAtom(arg));
}
}
vassert(isIRAtom(d->guard));
for (j = 0; d->args[j]; j++) {
IRExpr* arg = d->args[j];
- if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
+ if (LIKELY(!is_IRExpr_VECRET_or_GSPTR(arg)))
vassert(isIRAtom(arg));
}
if (d->mFx != Ifx_None)
Iex_ITE,
Iex_CCall,
Iex_VECRET,
- Iex_BBPTR
+ Iex_GSPTR
}
IRExprTag;
quite poor code to be generated. Try to avoid it.
In principle it would be allowable to have the arg vector
- contain an IRExpr_VECRET(), although not IRExpr_BBPTR(). However,
+ contain an IRExpr_VECRET(), although not IRExpr_GSPTR(). However,
at the moment there is no requirement for clean helper calls to
be able to return V128 or V256 values. Hence this is not allowed.
only appear at most once in an argument list, and it may not appear
at all in argument lists for clean helper calls. */
-static inline Bool is_IRExpr_VECRET_or_BBPTR ( const IRExpr* e ) {
- return e->tag == Iex_VECRET || e->tag == Iex_BBPTR;
+static inline Bool is_IRExpr_VECRET_or_GSPTR ( const IRExpr* e ) {
+ return e->tag == Iex_VECRET || e->tag == Iex_GSPTR;
}
extern IRExpr* IRExpr_CCall ( IRCallee* cee, IRType retty, IRExpr** args );
extern IRExpr* IRExpr_ITE ( IRExpr* cond, IRExpr* iftrue, IRExpr* iffalse );
extern IRExpr* IRExpr_VECRET ( void );
-extern IRExpr* IRExpr_BBPTR ( void );
+extern IRExpr* IRExpr_GSPTR ( void );
/* Deep-copy an IRExpr. */
extern IRExpr* deepCopyIRExpr ( const IRExpr* );
number of times at a fixed interval, if required.
Normally, code is generated to pass just the args to the helper.
- However, if IRExpr_BBPTR() is present in the argument list (at most
- one instance is allowed), then the baseblock pointer is passed for
+ However, if IRExpr_GSPTR() is present in the argument list (at most
+ one instance is allowed), then the guest state pointer is passed for
that arg, so that the callee can access the guest state. It is
- invalid for .nFxState to be zero but IRExpr_BBPTR() to be present,
+ invalid for .nFxState to be zero but IRExpr_GSPTR() to be present,
since .nFxState==0 is a claim that the call does not access guest
state.
allowed. */
IRCallee* cee; /* where to call */
IRExpr* guard; /* :: Ity_Bit. Controls whether call happens */
- /* The args vector may contain IRExpr_BBPTR() and/or
+ /* The args vector may contain IRExpr_GSPTR() and/or
IRExpr_VECRET(), in both cases, at most once. */
IRExpr** args; /* arg vector, ends in NULL. */
IRTemp tmp; /* to assign result to, or IRTemp_INVALID if none */