A cast to Addr replaces the former and the latter wasn't used.
git-svn-id: svn://svn.valgrind.org/vex/trunk@3061
const void* disp_cp_chain_me
= i->Ain.XDirect.toFastEP ? disp_cp_chain_me_to_fastEP
: disp_cp_chain_me_to_slowEP;
- p = emit64(p, Ptr_to_ULong(disp_cp_chain_me));
+ p = emit64(p, (Addr)disp_cp_chain_me);
/* call *%r11 */
*p++ = 0x41;
*p++ = 0xFF;
p = doAMode_M(p, i->Ain.XIndir.dstGA, i->Ain.XIndir.amRIP);
/* get $disp_cp_xindir into %r11 */
- if (fitsIn32Bits(Ptr_to_ULong(disp_cp_xindir))) {
+ if (fitsIn32Bits((Addr)disp_cp_xindir)) {
/* use a shorter encoding */
/* movl sign-extend(disp_cp_xindir), %r11 */
*p++ = 0x49;
*p++ = 0xC7;
*p++ = 0xC3;
- p = emit32(p, (UInt)Ptr_to_ULong(disp_cp_xindir));
+ p = emit32(p, (UInt)(Addr)disp_cp_xindir);
} else {
/* movabsq $disp_cp_xindir, %r11 */
*p++ = 0x49;
*p++ = 0xBB;
- p = emit64(p, Ptr_to_ULong(disp_cp_xindir));
+ p = emit64(p, (Addr)disp_cp_xindir);
}
/* jmp *%r11 */
/* movabsq $disp_assisted, %r11 */
*p++ = 0x49;
*p++ = 0xBB;
- p = emit64(p, Ptr_to_ULong(disp_cp_xassisted));
+ p = emit64(p, (Addr)disp_cp_xassisted);
/* jmp *%r11 */
*p++ = 0x41;
*p++ = 0xFF;
UChar* p = (UChar*)place_to_chain;
vassert(p[0] == 0x49);
vassert(p[1] == 0xBB);
- vassert(*(ULong*)(&p[2]) == Ptr_to_ULong(disp_cp_chain_me_EXPECTED));
+ vassert(*(Addr*)(&p[2]) == (Addr)disp_cp_chain_me_EXPECTED);
vassert(p[10] == 0x41);
vassert(p[11] == 0xFF);
vassert(p[12] == 0xD3);
vassert(delta == 0LL || delta == -1LL);
} else {
/* Minimal modifications from the starting sequence. */
- *(ULong*)(&p[2]) = Ptr_to_ULong(place_to_jump_to);
+ *(Addr*)(&p[2]) = (Addr)place_to_jump_to;
p[12] = 0xE3;
}
VexInvalRange vir = { (HWord)place_to_chain, 13 };
UChar* p = (UChar*)place_to_unchain;
Bool valid = False;
if (p[0] == 0x49 && p[1] == 0xBB
- && *(ULong*)(&p[2]) == Ptr_to_ULong(place_to_jump_to_EXPECTED)
+ && *(Addr*)(&p[2]) == (Addr)place_to_jump_to_EXPECTED
&& p[10] == 0x41 && p[11] == 0xFF && p[12] == 0xE3) {
/* it's the long form */
valid = True;
*/
p[0] = 0x49;
p[1] = 0xBB;
- *(ULong*)(&p[2]) = Ptr_to_ULong(disp_cp_chain_me);
+ *(Addr*)(&p[2]) = (Addr)disp_cp_chain_me;
p[10] = 0x41;
p[11] = 0xFF;
p[12] = 0xD3;
vassert(p[10] == 0x49);
vassert(p[11] == 0xFF);
vassert(p[12] == 0x03);
- ULong imm64 = (ULong)Ptr_to_ULong(location_of_counter);
+ ULong imm64 = (ULong)(Addr)location_of_counter;
p[2] = imm64 & 0xFF; imm64 >>= 8;
p[3] = imm64 & 0xFF; imm64 >>= 8;
p[4] = imm64 & 0xFF; imm64 >>= 8;
/* Finally, generate the call itself. This needs the *retloc value
set in the switch above, which is why it's at the end. */
addInstr(env,
- AMD64Instr_Call(cc, Ptr_to_ULong(cee->addr), n_args, *retloc));
+ AMD64Instr_Call(cc, (Addr)cee->addr, n_args, *retloc));
}
i->ARM64in.CSel.cond = cond;
return i;
}
-ARM64Instr* ARM64Instr_Call ( ARM64CondCode cond, HWord target, Int nArgRegs,
+ARM64Instr* ARM64Instr_Call ( ARM64CondCode cond, Addr64 target, Int nArgRegs,
RetLoc rloc ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_Call;
vex_printf("call%s ",
i->ARM64in.Call.cond==ARM64cc_AL
? " " : showARM64CondCode(i->ARM64in.Call.cond));
- vex_printf("0x%lx [nArgRegs=%d, ",
+ vex_printf("0x%llx [nArgRegs=%d, ",
i->ARM64in.Call.target, i->ARM64in.Call.nArgRegs);
ppRetLoc(i->ARM64in.Call.rloc);
vex_printf("]");
= i->ARM64in.XDirect.toFastEP ? disp_cp_chain_me_to_fastEP
: disp_cp_chain_me_to_slowEP;
p = imm64_to_iregNo_EXACTLY4(p, /*x*/9,
- Ptr_to_ULong(disp_cp_chain_me));
+ (Addr)disp_cp_chain_me);
*p++ = 0xD63F0120;
/* --- END of PATCHABLE BYTES --- */
/* imm64 x9, VG_(disp_cp_xindir) */
/* br x9 */
- p = imm64_to_iregNo(p, /*x*/9, Ptr_to_ULong(disp_cp_xindir));
+ p = imm64_to_iregNo(p, /*x*/9, (Addr)disp_cp_xindir);
*p++ = 0xD61F0120; /* br x9 */
/* Fix up the conditional jump, if there was one. */
/* imm64 x9, VG_(disp_cp_xassisted) */
/* br x9 */
- p = imm64_to_iregNo(p, /*x*/9, Ptr_to_ULong(disp_cp_xassisted));
+ p = imm64_to_iregNo(p, /*x*/9, (Addr)disp_cp_xassisted);
*p++ = 0xD61F0120; /* br x9 */
/* Fix up the conditional jump, if there was one. */
UInt* p = (UInt*)place_to_chain;
vassert(0 == (3 & (HWord)p));
vassert(is_imm64_to_iregNo_EXACTLY4(
- p, /*x*/9, Ptr_to_ULong(disp_cp_chain_me_EXPECTED)));
+ p, /*x*/9, (Addr)disp_cp_chain_me_EXPECTED));
vassert(p[4] == 0xD63F0120);
/* And what we want to change it to is:
The replacement has the same length as the original.
*/
(void)imm64_to_iregNo_EXACTLY4(
- p, /*x*/9, Ptr_to_ULong(place_to_jump_to));
+ p, /*x*/9, (Addr)place_to_jump_to);
p[4] = 0xD61F0120;
VexInvalRange vir = {(HWord)p, 20};
UInt* p = (UInt*)place_to_unchain;
vassert(0 == (3 & (HWord)p));
vassert(is_imm64_to_iregNo_EXACTLY4(
- p, /*x*/9, Ptr_to_ULong(place_to_jump_to_EXPECTED)));
+ p, /*x*/9, (Addr)place_to_jump_to_EXPECTED));
vassert(p[4] == 0xD61F0120);
/* And what we want to change it to is:
D6 3F 01 20
*/
(void)imm64_to_iregNo_EXACTLY4(
- p, /*x*/9, Ptr_to_ULong(disp_cp_chain_me));
+ p, /*x*/9, (Addr)disp_cp_chain_me);
p[4] = 0xD63F0120;
VexInvalRange vir = {(HWord)p, 20};
vassert(p[5] == 0x91000508);
vassert(p[6] == 0xF9000128);
imm64_to_iregNo_EXACTLY4(p, /*x*/9,
- Ptr_to_ULong(location_of_counter));
+ (Addr)location_of_counter);
VexInvalRange vir = {(HWord)p, 4*4};
return vir;
}
condition (which could be ARM64cc_AL). */
struct {
RetLoc rloc; /* where the return value will be */
- HWord target;
+ Addr64 target;
ARM64CondCode cond;
Int nArgRegs; /* # regs carrying args: 0 .. 8 */
} Call;
ARM64CondCode cond, IRJumpKind jk );
extern ARM64Instr* ARM64Instr_CSel ( HReg dst, HReg argL, HReg argR,
ARM64CondCode cond );
-extern ARM64Instr* ARM64Instr_Call ( ARM64CondCode, HWord, Int nArgRegs,
+extern ARM64Instr* ARM64Instr_Call ( ARM64CondCode, Addr64, Int nArgRegs,
RetLoc rloc );
extern ARM64Instr* ARM64Instr_AddToSP ( Int simm );
extern ARM64Instr* ARM64Instr_FromSP ( HReg dst );
HReg tmpregs[ARM64_N_ARGREGS];
Bool go_fast;
Int n_args, i, nextArgReg;
- ULong target;
+ Addr64 target;
vassert(ARM64_N_ARGREGS == 8);
number into the call (we'll need to know it when doing register
allocation, to know what regs the call reads.) */
- target = (HWord)Ptr_to_ULong(cee->addr);
+ target = (Addr)cee->addr;
addInstr(env, ARM64Instr_Call( cc, target, nextArgReg, *retloc ));
return True; /* success */
HReg res = newVRegI(env);
addInstr(env, ARM64Instr_MovI(hregARM64_X0(), regL));
addInstr(env, ARM64Instr_MovI(hregARM64_X1(), regR));
- addInstr(env, ARM64Instr_Call( ARM64cc_AL, (HWord)Ptr_to_ULong(fn),
+ addInstr(env, ARM64Instr_Call( ARM64cc_AL, (Addr)fn,
2, mk_RetLoc_simple(RLPri_Int) ));
addInstr(env, ARM64Instr_MovI(res, hregARM64_X0()));
return res;
vassert(cond != ARMcc_AL);
return i;
}
-ARMInstr* ARMInstr_Call ( ARMCondCode cond, HWord target, Int nArgRegs,
+ARMInstr* ARMInstr_Call ( ARMCondCode cond, Addr32 target, Int nArgRegs,
RetLoc rloc ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_Call;
vex_printf("call%s ",
i->ARMin.Call.cond==ARMcc_AL
? "" : showARMCondCode(i->ARMin.Call.cond));
- vex_printf("0x%lx [nArgRegs=%d, ",
+ vex_printf("0x%x [nArgRegs=%d, ",
i->ARMin.Call.target, i->ARMin.Call.nArgRegs);
ppRetLoc(i->ARMin.Call.rloc);
vex_printf("]");
= i->ARMin.XDirect.toFastEP ? disp_cp_chain_me_to_fastEP
: disp_cp_chain_me_to_slowEP;
p = imm32_to_iregNo_EXACTLY2(p, /*r*/12,
- (UInt)Ptr_to_ULong(disp_cp_chain_me));
+ (UInt)(Addr)disp_cp_chain_me);
*p++ = 0xE12FFF3C;
/* --- END of PATCHABLE BYTES --- */
/* movt r12, hi16(VG_(disp_cp_xindir)) */
/* bx r12 (A1) */
p = imm32_to_iregNo(p, /*r*/12,
- (UInt)Ptr_to_ULong(disp_cp_xindir));
+ (UInt)(Addr)disp_cp_xindir);
*p++ = 0xE12FFF1C;
/* Fix up the conditional jump, if there was one. */
/* movt r12, hi16(VG_(disp_cp_xassisted)) */
/* bx r12 (A1) */
p = imm32_to_iregNo(p, /*r*/12,
- (UInt)Ptr_to_ULong(disp_cp_xassisted));
+ (UInt)(Addr)disp_cp_xassisted);
*p++ = 0xE12FFF1C;
/* Fix up the conditional jump, if there was one. */
UInt* p = (UInt*)place_to_chain;
vassert(0 == (3 & (HWord)p));
vassert(is_imm32_to_iregNo_EXACTLY2(
- p, /*r*/12, (UInt)Ptr_to_ULong(disp_cp_chain_me_EXPECTED)));
+ p, /*r*/12, (UInt)(Addr)disp_cp_chain_me_EXPECTED));
vassert(p[2] == 0xE12FFF3C);
/* And what we want to change it to is either:
(general case)
p[2] = 0xFF000000;
} else {
(void)imm32_to_iregNo_EXACTLY2(
- p, /*r*/12, (UInt)Ptr_to_ULong(place_to_jump_to));
+ p, /*r*/12, (UInt)(Addr)place_to_jump_to);
p[2] = 0xE12FFF1C;
}
Bool valid = False;
if (is_imm32_to_iregNo_EXACTLY2(
- p, /*r*/12, (UInt)Ptr_to_ULong(place_to_jump_to_EXPECTED))
+ p, /*r*/12, (UInt)(Addr)place_to_jump_to_EXPECTED)
&& p[2] == 0xE12FFF1C) {
valid = True; /* it's the long form */
if (0)
E1 2F FF 3C
*/
(void)imm32_to_iregNo_EXACTLY2(
- p, /*r*/12, (UInt)Ptr_to_ULong(disp_cp_chain_me));
+ p, /*r*/12, (UInt)(Addr)disp_cp_chain_me);
p[2] = 0xE12FFF3C;
VexInvalRange vir = {(HWord)p, 12};
return vir;
vassert(p[6] == 0xE2ABB000);
vassert(p[7] == 0xE58CB004);
imm32_to_iregNo_EXACTLY2(p, /*r*/12,
- (UInt)Ptr_to_ULong(location_of_counter));
+ (UInt)(Addr)location_of_counter);
VexInvalRange vir = {(HWord)p, 8};
return vir;
}
condition (which could be ARMcc_AL). */
struct {
ARMCondCode cond;
- HWord target;
+ Addr32 target;
Int nArgRegs; /* # regs carrying args: 0 .. 4 */
RetLoc rloc; /* where the return value will be */
} Call;
extern ARMInstr* ARMInstr_XAssisted ( HReg dstGA, ARMAMode1* amR15T,
ARMCondCode cond, IRJumpKind jk );
extern ARMInstr* ARMInstr_CMov ( ARMCondCode, HReg dst, ARMRI84* src );
-extern ARMInstr* ARMInstr_Call ( ARMCondCode, HWord, Int nArgRegs,
+extern ARMInstr* ARMInstr_Call ( ARMCondCode, Addr32, Int nArgRegs,
RetLoc rloc );
extern ARMInstr* ARMInstr_Mul ( ARMMulOp op );
extern ARMInstr* ARMInstr_LdrEX ( Int szB );
HReg tmpregs[ARM_N_ARGREGS];
Bool go_fast;
Int n_args, i, nextArgReg;
- ULong target;
+ Addr32 target;
vassert(ARM_N_ARGREGS == 4);
instruction, a bitmask indicating which of r0/1/2/3 carry live
values. But that's too much hassle. */
- target = (HWord)Ptr_to_ULong(cee->addr);
+ target = (Addr)cee->addr;
addInstr(env, ARMInstr_Call( cc, target, nextArgReg, *retloc ));
return True; /* success */
HReg res = newVRegI(env);
addInstr(env, mk_iMOVds_RR(hregARM_R0(), regL));
addInstr(env, mk_iMOVds_RR(hregARM_R1(), regR));
- addInstr(env, ARMInstr_Call( ARMcc_AL, (HWord)Ptr_to_ULong(fn),
+ addInstr(env, ARMInstr_Call( ARMcc_AL, (Addr)fn,
2, mk_RetLoc_simple(RLPri_Int) ));
addInstr(env, mk_iMOVds_RR(res, hregARM_R0()));
return res;
HReg arg = iselIntExpr_R(env, e->Iex.Unop.arg);
HReg res = newVRegI(env);
addInstr(env, mk_iMOVds_RR(hregARM_R0(), arg));
- addInstr(env, ARMInstr_Call( ARMcc_AL, (HWord)Ptr_to_ULong(fn),
+ addInstr(env, ARMInstr_Call( ARMcc_AL, (Addr)fn,
1, mk_RetLoc_simple(RLPri_Int) ));
addInstr(env, mk_iMOVds_RR(res, hregARM_R0()));
return res;
= i->Min.XDirect.toFastEP ? disp_cp_chain_me_to_fastEP
: disp_cp_chain_me_to_slowEP;
p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9,
- Ptr_to_ULong(disp_cp_chain_me), mode64);
+ (Addr)disp_cp_chain_me, mode64);
/* jalr $9 */
/* nop */
p = mkFormR(p, 0, 9, 0, 31, 0, 9); /* p += 4 */
/* jalr r9 */
/* nop */
p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9,
- Ptr_to_ULong(disp_cp_xindir), mode64);
+ (Addr)disp_cp_xindir, mode64);
p = mkFormR(p, 0, 9, 0, 31, 0, 9); /* p += 4 */
p = mkFormR(p, 0, 0, 0, 0, 0, 0); /* p += 4 */
/* move r9, VG_(disp_cp_xassisted) */
p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9,
- (ULong)Ptr_to_ULong(disp_cp_xassisted), mode64);
+ (ULong)(Addr)disp_cp_xassisted, mode64);
/* jalr $9
nop */
p = mkFormR(p, 0, 9, 0, 31, 0, 9); /* p += 4 */
UChar* p = (UChar*)place_to_chain;
vassert(0 == (3 & (HWord)p));
vassert(isLoadImm_EXACTLY2or6(p, /*r*/9,
- (UInt)Ptr_to_ULong(disp_cp_chain_me_EXPECTED),
+ (UInt)(Addr)disp_cp_chain_me_EXPECTED,
mode64));
vassert(fetch32(p + (mode64 ? 24 : 8) + 0) == 0x120F809);
vassert(fetch32(p + (mode64 ? 24 : 8) + 4) == 0x00000000);
*/
p = mkLoadImm_EXACTLY2or6(p, /*r*/9,
- Ptr_to_ULong(place_to_jump_to), mode64);
+ (Addr)place_to_jump_to, mode64);
p = emit32(p, 0x120F809);
p = emit32(p, 0x00000000);
UChar* p = (UChar*)place_to_unchain;
vassert(0 == (3 & (HWord)p));
vassert(isLoadImm_EXACTLY2or6(p, /*r*/ 9,
- Ptr_to_ULong(place_to_jump_to_EXPECTED),
+ (Addr)place_to_jump_to_EXPECTED,
mode64));
vassert(fetch32(p + (mode64 ? 24 : 8) + 0) == 0x120F809);
vassert(fetch32(p + (mode64 ? 24 : 8) + 4) == 0x00000000);
The replacement has the same length as the original.
*/
p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9,
- Ptr_to_ULong(disp_cp_chain_me), mode64);
+ (Addr)disp_cp_chain_me, mode64);
p = emit32(p, 0x120F809);
p = emit32(p, 0x00000000);
}
p = mkLoadImm_EXACTLY2or6(p, /*r*/9,
- Ptr_to_ULong(location_of_counter), mode64);
+ (Addr)location_of_counter, mode64);
VexInvalRange vir = {(HWord)p, 8};
return vir;
vassert(0);
}
- ULong target = mode64 ? Ptr_to_ULong(cee->addr) :
- toUInt(Ptr_to_ULong(cee->addr));
+ Addr64 target = mode64 ? (Addr)cee->addr :
+ toUInt((Addr)cee->addr);
/* Finally, generate the call itself. This needs the *retloc value
set in the switch above, which is why it's at the end. */
if (cc == MIPScc_AL)
- addInstr(env, MIPSInstr_CallAlways(cc, (Addr64)target, argiregs,
+ addInstr(env, MIPSInstr_CallAlways(cc, target, argiregs,
*retloc));
else
- addInstr(env, MIPSInstr_Call(cc, (Addr64)target, argiregs, src, *retloc));
+ addInstr(env, MIPSInstr_Call(cc, target, argiregs, src, *retloc));
}
/*---------------------------------------------------------*/
argiregs |= (1 << 4);
argiregs |= (1 << 5);
addInstr(env, MIPSInstr_CallAlways( MIPScc_AL,
- (HWord)Ptr_to_ULong(fn),
+ (Addr)fn,
argiregs, rloc));
addInstr(env, mk_iMOVds_RR(res, hregMIPS_GPR2(env->mode64)));
return res;
addInstr(env, mk_iMOVds_RR(hregMIPS_GPR4(env->mode64), regL));
argiregs |= (1 << 4);
addInstr(env, MIPSInstr_CallAlways( MIPScc_AL,
- (HWord)Ptr_to_ULong(fn),
+ (Addr)fn,
argiregs, rloc));
addInstr(env, mk_iMOVds_RR(res, hregMIPS_GPR2(env->mode64)));
return res;
= i->Pin.XDirect.toFastEP ? disp_cp_chain_me_to_fastEP
: disp_cp_chain_me_to_slowEP;
p = mkLoadImm_EXACTLY2or5(
- p, /*r*/30, Ptr_to_ULong(disp_cp_chain_me), mode64, endness_host);
+ p, /*r*/30, (Addr)disp_cp_chain_me, mode64, endness_host);
/* mtctr r30 */
p = mkFormXFX(p, /*r*/30, 9, 467, endness_host);
/* bctrl */
);
/* imm32/64 r30, VG_(disp_cp_xindir) */
- p = mkLoadImm(p, /*r*/30, (ULong)Ptr_to_ULong(disp_cp_xindir), mode64,
+ p = mkLoadImm(p, /*r*/30, (ULong)(Addr)disp_cp_xindir, mode64,
endness_host);
/* mtctr r30 */
p = mkFormXFX(p, /*r*/30, 9, 467, endness_host);
/* imm32/64 r30, VG_(disp_cp_xassisted) */
p = mkLoadImm(p, /*r*/30,
- (ULong)Ptr_to_ULong(disp_cp_xassisted), mode64,
+ (ULong)(Addr)disp_cp_xassisted, mode64,
endness_host);
/* mtctr r30 */
p = mkFormXFX(p, /*r*/30, 9, 467, endness_host);
UChar* p = (UChar*)place_to_chain;
vassert(0 == (3 & (HWord)p));
vassert(isLoadImm_EXACTLY2or5(p, /*r*/30,
- Ptr_to_ULong(disp_cp_chain_me_EXPECTED),
+ (Addr)disp_cp_chain_me_EXPECTED,
mode64, endness_host));
vassert(fetch32(p + (mode64 ? 20 : 8) + 0, endness_host) == 0x7FC903A6);
vassert(fetch32(p + (mode64 ? 20 : 8) + 4, endness_host) == 0x4E800421);
The replacement has the same length as the original.
*/
p = mkLoadImm_EXACTLY2or5(p, /*r*/30,
- Ptr_to_ULong(place_to_jump_to), mode64,
+ (Addr)place_to_jump_to, mode64,
endness_host);
p = emit32(p, 0x7FC903A6, endness_host);
p = emit32(p, 0x4E800420, endness_host);
UChar* p = (UChar*)place_to_unchain;
vassert(0 == (3 & (HWord)p));
vassert(isLoadImm_EXACTLY2or5(p, /*r*/30,
- Ptr_to_ULong(place_to_jump_to_EXPECTED),
+ (Addr)place_to_jump_to_EXPECTED,
mode64, endness_host));
vassert(fetch32(p + (mode64 ? 20 : 8) + 0, endness_host) == 0x7FC903A6);
vassert(fetch32(p + (mode64 ? 20 : 8) + 4, endness_host) == 0x4E800420);
The replacement has the same length as the original.
*/
p = mkLoadImm_EXACTLY2or5(p, /*r*/30,
- Ptr_to_ULong(disp_cp_chain_me), mode64,
+ (Addr)disp_cp_chain_me, mode64,
endness_host);
p = emit32(p, 0x7FC903A6, endness_host);
p = emit32(p, 0x4E800421, endness_host);
vassert(fetch32(p + 24, endness_host) == 0x3BBD0001);
vassert(fetch32(p + 28, endness_host) == 0xFBBE0000);
p = mkLoadImm_EXACTLY2or5(p, /*r*/30,
- Ptr_to_ULong(location_of_counter),
+ (Addr)location_of_counter,
True/*mode64*/, endness_host);
len = p - (UChar*)place_to_patch;
vassert(len == 20);
vassert(fetch32(p + 24, endness_host) == 0x7FBD0194);
vassert(fetch32(p + 28, endness_host) == 0x93BE0000);
p = mkLoadImm_EXACTLY2or5(p, /*r*/30,
- Ptr_to_ULong(location_of_counter),
+ (Addr)location_of_counter,
False/*!mode64*/, endness_host);
len = p - (UChar*)place_to_patch;
vassert(len == 8);
/* Finally, generate the call itself. This needs the *retloc value
set in the switch above, which is why it's at the end. */
- ULong target = mode64 ? Ptr_to_ULong(cee->addr)
- : toUInt(Ptr_to_ULong(cee->addr));
- addInstr(env, PPCInstr_Call( cc, (Addr64)target, argiregs, *retloc ));
+ Addr64 target = mode64 ? (Addr)cee->addr
+ : toUInt((Addr)(cee->addr));
+ addInstr(env, PPCInstr_Call( cc, target, argiregs, *retloc ));
}
cc = mk_PPCCondCode( Pct_ALWAYS, Pcf_NONE );
if (IEndianess == Iend_LE) {
- addInstr(env, PPCInstr_Call( cc, Ptr_to_ULong(h_calc_BCDtoDPB),
+ addInstr(env, PPCInstr_Call( cc, (Addr)h_calc_BCDtoDPB,
argiregs,
mk_RetLoc_simple(RLPri_Int)) );
} else {
cc = mk_PPCCondCode( Pct_ALWAYS, Pcf_NONE );
if (IEndianess == Iend_LE) {
- addInstr(env, PPCInstr_Call( cc, Ptr_to_ULong(h_calc_DPBtoBCD),
+ addInstr(env, PPCInstr_Call( cc, (Addr)h_calc_DPBtoBCD,
argiregs,
mk_RetLoc_simple(RLPri_Int) ) );
} else {
cc = mk_PPCCondCode( Pct_ALWAYS, Pcf_NONE );
if (IEndianess == Iend_LE) {
- addInstr( env, PPCInstr_Call( cc, Ptr_to_ULong(h_calc_BCDtoDPB),
+ addInstr( env, PPCInstr_Call( cc, (Addr)h_calc_BCDtoDPB,
argiregs,
mk_RetLoc_simple(RLPri_2Int) ) );
} else {
- ULong target;
- target = mode64 ? Ptr_to_ULong(h_calc_BCDtoDPB) :
- toUInt( Ptr_to_ULong(h_calc_BCDtoDPB ) );
- addInstr( env, PPCInstr_Call( cc, (Addr64)target,
+ Addr64 target;
+ target = mode64 ? (Addr)h_calc_BCDtoDPB :
+ toUInt( (Addr)h_calc_BCDtoDPB );
+ addInstr( env, PPCInstr_Call( cc, target,
argiregs,
mk_RetLoc_simple(RLPri_2Int) ) );
}
cc = mk_PPCCondCode( Pct_ALWAYS, Pcf_NONE );
if (IEndianess == Iend_LE) {
- addInstr(env, PPCInstr_Call( cc, Ptr_to_ULong(h_calc_DPBtoBCD),
+ addInstr(env, PPCInstr_Call( cc, (Addr)h_calc_DPBtoBCD,
argiregs,
mk_RetLoc_simple(RLPri_2Int) ) );
} else {
- ULong target;
- target = mode64 ? Ptr_to_ULong(h_calc_DPBtoBCD) :
- toUInt( Ptr_to_ULong( h_calc_DPBtoBCD ) );
- addInstr(env, PPCInstr_Call( cc, (Addr64)target, argiregs,
+ Addr64 target;
+ target = mode64 ? (Addr)h_calc_DPBtoBCD :
+ toUInt( (Addr)h_calc_DPBtoBCD );
+ addInstr(env, PPCInstr_Call( cc, target, argiregs,
mk_RetLoc_simple(RLPri_2Int) ) );
}
buf = s390_emit_BASR(buf, 1, R0);
/* --- FIRST PATCHABLE BYTE follows (must not modify %r1) --- */
- ULong addr = Ptr_to_ULong(disp_cp_chain_me);
+ Addr64 addr = (Addr)disp_cp_chain_me;
buf = s390_tchain_load64(buf, S390_REGNO_TCHAIN_SCRATCH, addr);
/* goto *tchain_scratch */
/* load tchain_scratch, #disp_indir */
buf = s390_tchain_load64(buf, S390_REGNO_TCHAIN_SCRATCH,
- Ptr_to_ULong(disp_cp_xindir));
+ (Addr)disp_cp_xindir);
/* goto *tchain_direct */
buf = s390_emit_BCR(buf, S390_CC_ALWAYS, S390_REGNO_TCHAIN_SCRATCH);
/* load tchain_scratch, #disp_assisted */
buf = s390_tchain_load64(buf, S390_REGNO_TCHAIN_SCRATCH,
- Ptr_to_ULong(disp_cp_xassisted));
+ (Addr)disp_cp_xassisted);
/* goto *tchain_direct */
buf = s390_emit_BCR(buf, S390_CC_ALWAYS, S390_REGNO_TCHAIN_SCRATCH);
s390_tchain_verify_load64(code_to_patch, S390_REGNO_TCHAIN_SCRATCH, 0);
UChar *p = s390_tchain_patch_load64(code_to_patch,
- Ptr_to_ULong(location_of_counter));
+ (Addr)location_of_counter);
UInt len = p - (UChar *)code_to_patch;
VexInvalRange vir = { (HWord)code_to_patch, len };
*/
const UChar *next;
next = s390_tchain_verify_load64(place_to_chain, S390_REGNO_TCHAIN_SCRATCH,
- Ptr_to_ULong(disp_cp_chain_me_EXPECTED));
+ (Addr)disp_cp_chain_me_EXPECTED);
vassert(s390_insn_is_BR(next, S390_REGNO_TCHAIN_SCRATCH));
/* And what we want to change it to is either:
load tchain_scratch, #place_to_jump_to
goto *tchain_scratch
*/
- ULong addr = Ptr_to_ULong(place_to_jump_to);
+ Addr64 addr = (Addr)place_to_jump_to;
p = s390_tchain_load64(p, S390_REGNO_TCHAIN_SCRATCH, addr);
/* There is not need to emit a BCR here, as it is already there. */
}
const UChar *next;
next = s390_tchain_verify_load64(p, S390_REGNO_TCHAIN_SCRATCH,
- Ptr_to_ULong(place_to_jump_to_EXPECTED));
+ (Addr)place_to_jump_to_EXPECTED);
/* Check for BR *tchain_scratch */
vassert(s390_insn_is_BR(next, S390_REGNO_TCHAIN_SCRATCH));
}
address (see s390_insn_xdirect_emit). */
p = s390_emit_BASR(p - S390_BASR_LEN, 1, R0);
- ULong addr = Ptr_to_ULong(disp_cp_chain_me);
+ Addr64 addr = (Addr)disp_cp_chain_me;
p = s390_tchain_load64(p, S390_REGNO_TCHAIN_SCRATCH, addr);
/* Emit the BCR in case the short form was used. In case of the long
IRCallee *callee, IRType retTy, IRExpr **args)
{
UInt n_args, i, argreg, size;
- ULong target;
+ Addr64 target;
HReg tmpregs[S390_NUM_GPRPARMS];
s390_cc_t cc;
addInstr(env, s390_insn_move(size, finalreg, tmpregs[i]));
}
- target = Ptr_to_ULong(callee->addr);
+ target = (Addr)callee->addr;
/* Do final checks, set the return values, and generate the call
instruction proper. */
}
/* Finally, the call itself. */
- addInstr(env, s390_insn_helper_call(cc, (Addr64)target, n_args,
+ addInstr(env, s390_insn_helper_call(cc, target, n_args,
callee->name, *retloc));
}
const void* disp_cp_chain_me
= i->Xin.XDirect.toFastEP ? disp_cp_chain_me_to_fastEP
: disp_cp_chain_me_to_slowEP;
- p = emit32(p, (UInt)Ptr_to_ULong(disp_cp_chain_me));
+ p = emit32(p, (UInt)(Addr)disp_cp_chain_me);
/* call *%edx */
*p++ = 0xFF;
*p++ = 0xD2;
/* movl $disp_indir, %edx */
*p++ = 0xBA;
- p = emit32(p, (UInt)Ptr_to_ULong(disp_cp_xindir));
+ p = emit32(p, (UInt)(Addr)disp_cp_xindir);
/* jmp *%edx */
*p++ = 0xFF;
*p++ = 0xE2;
/* movl $disp_indir, %edx */
*p++ = 0xBA;
- p = emit32(p, (UInt)Ptr_to_ULong(disp_cp_xassisted));
+ p = emit32(p, (UInt)(Addr)disp_cp_xassisted);
/* jmp *%edx */
*p++ = 0xFF;
*p++ = 0xE2;
*/
UChar* p = (UChar*)place_to_chain;
vassert(p[0] == 0xBA);
- vassert(*(UInt*)(&p[1]) == (UInt)Ptr_to_ULong(disp_cp_chain_me_EXPECTED));
+ vassert(*(UInt*)(&p[1]) == (UInt)(Addr)disp_cp_chain_me_EXPECTED);
vassert(p[5] == 0xFF);
vassert(p[6] == 0xD2);
/* And what we want to change it to is:
So it's the same length (convenient, huh).
*/
p[0] = 0xBA;
- *(UInt*)(&p[1]) = (UInt)Ptr_to_ULong(disp_cp_chain_me);
+ *(UInt*)(&p[1]) = (UInt)(Addr)disp_cp_chain_me;
p[5] = 0xFF;
p[6] = 0xD2;
VexInvalRange vir = { (HWord)place_to_unchain, 7 };
vassert(p[11] == 0x00);
vassert(p[12] == 0x00);
vassert(p[13] == 0x00);
- UInt imm32 = (UInt)Ptr_to_ULong(location_of_counter);
+ UInt imm32 = (UInt)(Addr)location_of_counter;
p[2] = imm32 & 0xFF; imm32 >>= 8;
p[3] = imm32 & 0xFF; imm32 >>= 8;
p[4] = imm32 & 0xFF; imm32 >>= 8;
p[5] = imm32 & 0xFF; imm32 >>= 8;
- imm32 = 4 + (UInt)Ptr_to_ULong(location_of_counter);
+ imm32 = 4 + (UInt)(Addr)location_of_counter;
p[9] = imm32 & 0xFF; imm32 >>= 8;
p[10] = imm32 & 0xFF; imm32 >>= 8;
p[11] = imm32 & 0xFF; imm32 >>= 8;
parameters. */
vassert(sizeof(void*) == 4);
- addInstr(env, X86Instr_Call( cc, toUInt(Ptr_to_ULong(cee->addr)),
+ addInstr(env, X86Instr_Call( cc, (Addr)cee->addr,
cee->regparms, rloc));
if (n_arg_ws > 0)
add_to_esp(env, 4*n_arg_ws);
*/
HReg xLo, xHi;
HReg dst = newVRegI(env);
- HWord fn = (HWord)h_generic_calc_GetMSBs8x8;
+ Addr fn = (Addr)h_generic_calc_GetMSBs8x8;
iselInt64Expr(&xHi, &xLo, env, e->Iex.Unop.arg);
addInstr(env, X86Instr_Push(X86RMI_Reg(xHi)));
addInstr(env, X86Instr_Push(X86RMI_Reg(xLo)));
- addInstr(env, X86Instr_Call( Xcc_ALWAYS, (UInt)fn,
+ addInstr(env, X86Instr_Call( Xcc_ALWAYS, (Addr32)fn,
0, mk_RetLoc_simple(RLPri_Int) ));
add_to_esp(env, 2*4);
addInstr(env, mk_iMOVsd_RR(hregX86_EAX(), dst));
iselInt64Expr(&xHi, &xLo, env, e->Iex.Binop.arg1);
addInstr(env, X86Instr_Push(X86RMI_Reg(xHi)));
addInstr(env, X86Instr_Push(X86RMI_Reg(xLo)));
- addInstr(env, X86Instr_Call( Xcc_ALWAYS, (UInt)fn,
+ addInstr(env, X86Instr_Call( Xcc_ALWAYS, (Addr32)fn,
0, mk_RetLoc_simple(RLPri_2Int) ));
add_to_esp(env, 4*4);
addInstr(env, mk_iMOVsd_RR(hregX86_EDX(), tHi));
iselInt64Expr(&xHi, &xLo, env, e->Iex.Binop.arg1);
addInstr(env, X86Instr_Push(X86RMI_Reg(xHi)));
addInstr(env, X86Instr_Push(X86RMI_Reg(xLo)));
- addInstr(env, X86Instr_Call( Xcc_ALWAYS, (UInt)fn,
+ addInstr(env, X86Instr_Call( Xcc_ALWAYS, (Addr32)fn,
0, mk_RetLoc_simple(RLPri_2Int) ));
add_to_esp(env, 3*4);
addInstr(env, mk_iMOVsd_RR(hregX86_EDX(), tHi));
iselInt64Expr(&xHi, &xLo, env, e->Iex.Unop.arg);
addInstr(env, X86Instr_Push(X86RMI_Reg(xHi)));
addInstr(env, X86Instr_Push(X86RMI_Reg(xLo)));
- addInstr(env, X86Instr_Call( Xcc_ALWAYS, (UInt)fn,
+ addInstr(env, X86Instr_Call( Xcc_ALWAYS, (Addr32)fn,
0, mk_RetLoc_simple(RLPri_2Int) ));
add_to_esp(env, 2*4);
addInstr(env, mk_iMOVsd_RR(hregX86_EDX(), tHi));
case 'p':
case 'P': {
Bool hexcaps = toBool(*format == 'P');
- ULong l = Ptr_to_ULong( va_arg(ap, void*) );
+ ULong l = (Addr)va_arg(ap, void*);
convert_int(intbuf, l, 16/*base*/, False/*unsigned*/, hexcaps);
len1 = len3 = 0;
len2 = vex_strlen(intbuf)+2;
machine. */
typedef unsigned long HWord;
-
-/* We need to know the host word size in order to write Ptr_to_ULong
- and ULong_to_Ptr in a way that doesn't cause compilers to complain.
- These functions allow us to cast pointers to and from 64-bit
- integers without complaints from compilers, regardless of the host
- word size.
-
- Also set up VEX_REGPARM.
-*/
-
+/* Set up VEX_HOST_WORDSIZE and VEX_REGPARM. */
#undef VEX_HOST_WORDSIZE
#undef VEX_REGPARM
#endif
-#if VEX_HOST_WORDSIZE == 8
- static inline ULong Ptr_to_ULong ( const void* p ) {
- return (ULong)p;
- }
- static inline void* ULong_to_Ptr ( ULong n ) {
- return (void*)n;
- }
-#elif VEX_HOST_WORDSIZE == 4
- static inline ULong Ptr_to_ULong ( const void* p ) {
- UInt w = (UInt)p;
- return (ULong)w;
- }
- static inline void* ULong_to_Ptr ( ULong n ) {
- UInt w = (UInt)n;
- return (void*)w;
- }
-#else
-# error "Vex: Fatal: Can't define Ptr_to_ULong / ULong_to_Ptr"
-#endif
-
-
#endif /* ndef __LIBVEX_BASICTYPES_H */
/*---------------------------------------------------------------*/
# else
ad = calloc(1, stab[j].st_size);
# endif
- // assert( Ptr_to_ULong(ad) < 0xF0000000ULL );
+ // assert( (Addr)ad < 0xF0000000ULL );
if (0)
fprintf(stderr, "COMMON symbol, size %lld name %s allocd %p\n",