Constify pointer typed members of VexTanslateArgs.
Fix ripple.
git-svn-id: svn://svn.valgrind.org/vex/trunk@2959
Bool (*resteerOkFn) ( void*, Addr64 ),
Bool resteerCisOk,
void* callback_opaque,
- UChar* guest_code,
+ const UChar* guest_code,
Long delta,
Addr64 guest_IP,
VexArch guest_arch,
/* Pointer to the guest code area (points to start of BB, not to the
insn being processed). */
-static UChar* guest_code;
+static const UChar* guest_code;
/* The guest address corresponding to guest_code[0]. */
static Addr64 guest_RIP_bbstart;
Same for BTS, BTR
*/
-static Bool can_be_used_with_LOCK_prefix ( UChar* opc )
+static Bool can_be_used_with_LOCK_prefix ( const UChar* opc )
{
switch (opc[0]) {
case 0x00: case 0x01: case 0x08: case 0x09:
/* Spot "Special" instructions (see comment at top of file). */
{
- UChar* code = (UChar*)(guest_code + delta);
+ const UChar* code = guest_code + delta;
/* Spot the 16-byte preamble:
48C1C703 rolq $3, %rdi
48C1C70D rolq $13, %rdi
leading escapes. Check that any LOCK prefix is actually
allowed. */
if (haveLOCK(pfx)) {
- if (can_be_used_with_LOCK_prefix( (UChar*)&guest_code[delta] )) {
+ if (can_be_used_with_LOCK_prefix( &guest_code[delta] )) {
DIP("lock ");
} else {
*expect_CAS = False;
SSE2 as a minimum so there is no point distinguishing SSE1 vs
SSE2. */
- insn = (UChar*)&guest_code[delta];
+ insn = &guest_code[delta];
/* FXSAVE is spuriously at the start here only because it is
thusly placed in guest-x86/toIR.c. */
Bool (*resteerOkFn) ( void*, Addr64 ),
Bool resteerCisOk,
void* callback_opaque,
- UChar* guest_code_IN,
+ const UChar* guest_code_IN,
Long delta,
Addr64 guest_IP,
VexArch guest_arch,
Bool (*resteerOkFn) ( void*, Addr64 ),
Bool resteerCisOk,
void* callback_opaque,
- UChar* guest_code,
+ const UChar* guest_code,
Long delta,
Addr64 guest_IP,
VexArch guest_arch,
/* Do a little-endian load of a 32-bit word, regardless of the
endianness of the underlying host. */
-static inline UInt getUIntLittleEndianly ( UChar* p )
+static inline UInt getUIntLittleEndianly ( const UChar* p )
{
UInt w = 0;
w = (w << 8) | p[3];
Bool (*resteerOkFn) ( /*opaque*/void*, Addr64 ),
Bool resteerCisOk,
void* callback_opaque,
- UChar* guest_instr,
+ const UChar* guest_instr,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo
)
/* Spot "Special" instructions (see comment at top of file). */
{
- UChar* code = (UChar*)guest_instr;
+ const UChar* code = guest_instr;
/* Spot the 16-byte preamble:
93CC0D8C ror x12, x12, #3
93CC358C ror x12, x12, #13
Bool (*resteerOkFn) ( void*, Addr64 ),
Bool resteerCisOk,
void* callback_opaque,
- UChar* guest_code_IN,
+ const UChar* guest_code_IN,
Long delta_IN,
Addr64 guest_IP,
VexArch guest_arch,
/* Try to decode */
Bool ok = disInstr_ARM64_WRK( &dres,
resteerOkFn, resteerCisOk, callback_opaque,
- (UChar*)&guest_code_IN[delta_IN],
+ &guest_code_IN[delta_IN],
archinfo, abiinfo );
if (ok) {
/* All decode successes end up here. */
Int i, j;
UChar buf[64];
UInt insn
- = getUIntLittleEndianly( (UChar*)&guest_code_IN[delta_IN] );
+ = getUIntLittleEndianly( &guest_code_IN[delta_IN] );
vex_bzero(buf, sizeof(buf));
for (i = j = 0; i < 32; i++) {
if (i > 0) {
Bool (*resteerOkFn) ( void*, Addr64 ),
Bool resteerCisOk,
void* callback_opaque,
- UChar* guest_code,
+ const UChar* guest_code,
Long delta,
Addr64 guest_IP,
VexArch guest_arch,
/* Do a little-endian load of a 32-bit word, regardless of the
endianness of the underlying host. */
-static inline UInt getUIntLittleEndianly ( UChar* p )
+static inline UInt getUIntLittleEndianly ( const UChar* p )
{
UInt w = 0;
w = (w << 8) | p[3];
/* Do a little-endian load of a 16-bit word, regardless of the
endianness of the underlying host. */
-static inline UShort getUShortLittleEndianly ( UChar* p )
+static inline UShort getUShortLittleEndianly ( const UChar* p )
{
UShort w = 0;
w = (w << 8) | p[1];
Bool (*resteerOkFn) ( /*opaque*/void*, Addr64 ),
Bool resteerCisOk,
void* callback_opaque,
- UChar* guest_instr,
+ const UChar* guest_instr,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
Bool sigill_diag
/* Spot "Special" instructions (see comment at top of file). */
{
- UChar* code = (UChar*)guest_instr;
+ const UChar* code = guest_instr;
/* Spot the 16-byte preamble:
e1a0c1ec mov r12, r12, ROR #3
Bool (*resteerOkFn) ( /*opaque*/void*, Addr64 ),
Bool resteerCisOk,
void* callback_opaque,
- UChar* guest_instr,
+ const UChar* guest_instr,
VexArchInfo* archinfo,
VexAbiInfo* abiinfo,
Bool sigill_diag
/* ----------------------------------------------------------- */
/* Spot "Special" instructions (see comment at top of file). */
{
- UChar* code = (UChar*)guest_instr;
+ const UChar* code = guest_instr;
/* Spot the 16-byte preamble:
ea4f 0cfc mov.w ip, ip, ror #3
Bool (*resteerOkFn) ( void*, Addr64 ),
Bool resteerCisOk,
void* callback_opaque,
- UChar* guest_code_IN,
+ const UChar* guest_code_IN,
Long delta_ENCODED,
Addr64 guest_IP_ENCODED,
VexArch guest_arch,
/*OUT*/UInt* n_guest_instrs, /* stats only */
/*IN*/ void* callback_opaque,
/*IN*/ DisOneInstrFn dis_instr_fn,
- /*IN*/ UChar* guest_code,
+ /*IN*/ const UChar* guest_code,
/*IN*/ Addr64 guest_IP_bbstart,
/*IN*/ Bool (*chase_into_ok)(void*,Addr64),
/*IN*/ VexEndness host_endness,
/*IN*/ void* callback_opaque,
/* Where is the guest code? */
- /*IN*/ UChar* guest_code,
+ /*IN*/ const UChar* guest_code,
/* Where is the actual insn? Note: it's at &guest_code[delta] */
/*IN*/ Long delta,
/*OUT*/UInt* n_guest_instrs, /* stats only */
/*IN*/ void* callback_opaque,
/*IN*/ DisOneInstrFn dis_instr_fn,
- /*IN*/ UChar* guest_code,
+ /*IN*/ const UChar* guest_code,
/*IN*/ Addr64 guest_IP_bbstart,
/*IN*/ Bool (*chase_into_ok)(void*,Addr64),
/*IN*/ VexEndness host_endness,
Bool (*resteerOkFn) (void *, Addr64),
Bool resteerCisOk,
void* callback_opaque,
- UChar* guest_code,
+ const UChar* guest_code,
Long delta,
Addr64 guest_IP,
VexArch guest_arch,
static VexEndness host_endness;
/* Pointer to the guest code area. */
-static UChar *guest_code;
+static const UChar *guest_code;
/* CONST: The guest address for the instruction currently being
translated. */
/* Do a endian load of a 32-bit word, regardless of the endianness of the
underlying host. */
-static inline UInt getUInt(UChar * p)
+static inline UInt getUInt(const UChar * p)
{
UInt w = 0;
#if defined (_MIPSEL)
return (0x03ff0000 & mipsins) >> 16;
}
-static Bool branch_or_jump(UChar * addr)
+static Bool branch_or_jump(const UChar * addr)
{
UInt fmt;
UInt cins = getUInt(addr);
return False;
}
-static Bool is_Branch_or_Jump_and_Link(UChar * addr)
+static Bool is_Branch_or_Jump_and_Link(const UChar * addr)
{
UInt cins = getUInt(addr);
return False;
}
-static Bool branch_or_link_likely(UChar * addr)
+static Bool branch_or_link_likely(const UChar * addr)
{
UInt cins = getUInt(addr);
UInt opcode = get_opcode(cins);
delay_slot_branch = likely_delay_slot = delay_slot_jump = False;
- UChar *code = (UChar *) (guest_code + delta);
+ const UChar *code = guest_code + delta;
cins = getUInt(code);
DIP("\t0x%lx:\t0x%08x\t", (long)guest_PC_curr_instr, cins);
Bool (*resteerOkFn) ( void *, Addr64 ),
Bool resteerCisOk,
void* callback_opaque,
- UChar* guest_code_IN,
+ const UChar* guest_code_IN,
Long delta,
Addr64 guest_IP,
VexArch guest_arch,
Bool (*resteerOkFn) ( void*, Addr64 ),
Bool resteerCisOk,
void* callback_opaque,
- UChar* guest_code,
+ const UChar* guest_code,
Long delta,
Addr64 guest_IP,
VexArch guest_arch,
static VexEndness host_endness;
/* Pointer to the guest code area. */
-static UChar* guest_code;
+static const UChar* guest_code;
/* The guest address corresponding to guest_code[0]. */
static Addr64 guest_CIA_bbstart;
/* Do a proper-endian load of a 32-bit word, regardless of the endianness
of the underlying host. */
-static UInt getUIntPPCendianly ( UChar* p )
+static UInt getUIntPPCendianly ( const UChar* p )
{
UInt w = 0;
if (host_endness == VexEndnessBE) {
/* At least this is simple on PPC32: insns are all 4 bytes long, and
4-aligned. So just fish the whole thing out of memory right now
and have done. */
- theInstr = getUIntPPCendianly( (UChar*)(&guest_code[delta]) );
+ theInstr = getUIntPPCendianly( &guest_code[delta] );
if (0) vex_printf("insn: 0x%x\n", theInstr);
/* Spot "Special" instructions (see comment at top of file). */
{
- UChar* code = (UChar*)(guest_code + delta);
+ const UChar* code = guest_code + delta;
/* Spot the 16-byte preamble:
32-bit mode:
5400183E rlwinm 0,0,3,0,31
case 0x32E: case 0x34E: case 0x36E: // tabortdc., tabortwci., tabortdci.
case 0x38E: case 0x3AE: case 0x3EE: // tabort., treclaim., trechkpt.
if (dis_transactional_memory( theInstr,
- getUIntPPCendianly( (UChar*)(&guest_code[delta + 4])),
+ getUIntPPCendianly( &guest_code[delta + 4]),
abiinfo, &dres,
resteerOkFn, callback_opaque))
goto decode_success;
Bool (*resteerOkFn) ( void*, Addr64 ),
Bool resteerCisOk,
void* callback_opaque,
- UChar* guest_code_IN,
+ const UChar* guest_code_IN,
Long delta,
Addr64 guest_IP,
VexArch guest_arch,
Bool (*resteerOkFn) ( void*, Addr64 ),
Bool resteerCisOk,
void* callback_opaque,
- UChar* guest_code,
+ const UChar* guest_code,
Long delta,
Addr64 guest_IP,
VexArch guest_arch,
/*------------------------------------------------------------*/
/*--- Forward declarations ---*/
/*------------------------------------------------------------*/
-static UInt s390_decode_and_irgen(UChar *, UInt, DisResult *);
+static UInt s390_decode_and_irgen(const UChar *, UInt, DisResult *);
static void s390_irgen_xonc(IROp, IRTemp, IRTemp, IRTemp);
static void s390_irgen_CLC_EX(IRTemp, IRTemp, IRTemp);
static s390_decode_t
-s390_decode_2byte_and_irgen(UChar *bytes)
+s390_decode_2byte_and_irgen(const UChar *bytes)
{
typedef union {
struct {
}
static s390_decode_t
-s390_decode_4byte_and_irgen(UChar *bytes)
+s390_decode_4byte_and_irgen(const UChar *bytes)
{
typedef union {
struct {
}
static s390_decode_t
-s390_decode_6byte_and_irgen(UChar *bytes)
+s390_decode_6byte_and_irgen(const UChar *bytes)
{
typedef union {
struct {
/* Handle "special" instructions. */
static s390_decode_t
-s390_decode_special_and_irgen(UChar *bytes)
+s390_decode_special_and_irgen(const UChar *bytes)
{
s390_decode_t status = S390_DECODE_OK;
/* Function returns # bytes that were decoded or 0 in case of failure */
static UInt
-s390_decode_and_irgen(UChar *bytes, UInt insn_length, DisResult *dres)
+s390_decode_and_irgen(const UChar *bytes, UInt insn_length, DisResult *dres)
{
s390_decode_t status;
/* Disassemble a single instruction INSN into IR. */
static DisResult
-disInstr_S390_WRK(UChar *insn)
+disInstr_S390_WRK(const UChar *insn)
{
UChar byte;
UInt insn_length;
Bool (*resteerOkFn)(void *, Addr64),
Bool resteerCisOk,
void *callback_opaque,
- UChar *guest_code,
+ const UChar *guest_code,
Long delta,
Addr64 guest_IP,
VexArch guest_arch,
Bool (*resteerOkFn) ( void*, Addr64 ),
Bool resteerCisOk,
void* callback_opaque,
- UChar* guest_code,
+ const UChar* guest_code,
Long delta,
Addr64 guest_IP,
VexArch guest_arch,
/* Pointer to the guest code area (points to start of BB, not to the
insn being processed). */
-static UChar* guest_code;
+static const UChar* guest_code;
/* The guest address corresponding to guest_code[0]. */
static Addr32 guest_EIP_bbstart;
static UInt getSDisp16 ( Int delta0 )
{
- UChar* eip = (UChar*)(&guest_code[delta0]);
+ const UChar* eip = &guest_code[delta0];
UInt d = *eip++;
d |= ((*eip++) << 8);
return extend_s_16to32(d);
Same for BTS, BTR
*/
-static Bool can_be_used_with_LOCK_prefix ( UChar* opc )
+static Bool can_be_used_with_LOCK_prefix ( const UChar* opc )
{
switch (opc[0]) {
case 0x00: case 0x01: case 0x08: case 0x09:
HChar dis_buf[50];
Int am_sz, d_sz, n_prefixes;
DisResult dres;
- UChar* insn; /* used in SSE decoders */
+ const UChar* insn; /* used in SSE decoders */
/* The running delta */
Int delta = (Int)delta64;
/* Spot "Special" instructions (see comment at top of file). */
{
- UChar* code = (UChar*)(guest_code + delta);
+ const UChar* code = guest_code + delta;
/* Spot the 12-byte preamble:
C1C703 roll $3, %edi
C1C70D roll $13, %edi
/* Handle a couple of weird-ass NOPs that have been observed in the
wild. */
{
- UChar* code = (UChar*)(guest_code + delta);
+ const UChar* code = guest_code + delta;
/* Sun's JVM 1.5.0 uses the following as a NOP:
26 2E 64 65 90 %es:%cs:%fs:%gs:nop */
if (code[0] == 0x26 && code[1] == 0x2E && code[2] == 0x64
allowed. */
if (pfx_lock) {
- if (can_be_used_with_LOCK_prefix( (UChar*)&guest_code[delta] )) {
+ if (can_be_used_with_LOCK_prefix( &guest_code[delta] )) {
DIP("lock ");
} else {
*expect_CAS = False;
/* Note, this doesn't handle SSE2 or SSE3. That is handled in a
later section, further on. */
- insn = (UChar*)&guest_code[delta];
+ insn = &guest_code[delta];
/* Treat fxsave specially. It should be doable even on an SSE0
(Pentium-II class) CPU. Hence be prepared to handle it on
if (0 == (archinfo->hwcaps & VEX_HWCAPS_X86_SSE2))
goto after_sse_decoders; /* no SSE2 capabilities */
- insn = (UChar*)&guest_code[delta];
+ insn = &guest_code[delta];
/* 66 0F 58 = ADDPD -- add 32Fx4 from R/M to R */
if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x58) {
if (0 == (archinfo->hwcaps & VEX_HWCAPS_X86_SSE2))
goto after_sse_decoders; /* no SSE3 capabilities */
- insn = (UChar*)&guest_code[delta];
+ insn = &guest_code[delta];
/* F3 0F 12 = MOVSLDUP -- move from E (mem or xmm) to G (xmm),
duplicating some lanes (2:2:0:0). */
Bool (*resteerOkFn) ( void*, Addr64 ),
Bool resteerCisOk,
void* callback_opaque,
- UChar* guest_code_IN,
+ const UChar* guest_code_IN,
Long delta,
Addr64 guest_IP,
VexArch guest_arch,
Int emit_AMD64Instr ( /*MB_MOD*/Bool* is_profInc,
UChar* buf, Int nbuf, AMD64Instr* i,
Bool mode64, VexEndness endness_host,
- void* disp_cp_chain_me_to_slowEP,
- void* disp_cp_chain_me_to_fastEP,
- void* disp_cp_xindir,
- void* disp_cp_xassisted )
+ const void* disp_cp_chain_me_to_slowEP,
+ const void* disp_cp_chain_me_to_fastEP,
+ const void* disp_cp_xindir,
+ const void* disp_cp_xassisted )
{
UInt /*irno,*/ opc, opc_rr, subopc_imm, opc_imma, opc_cl, opc_imm, subopc;
UInt xtra;
/* movabsq $disp_cp_chain_me_to_{slow,fast}EP,%r11; */
*p++ = 0x49;
*p++ = 0xBB;
- void* disp_cp_chain_me
+ const void* disp_cp_chain_me
= i->Ain.XDirect.toFastEP ? disp_cp_chain_me_to_fastEP
: disp_cp_chain_me_to_slowEP;
p = emit64(p, Ptr_to_ULong(disp_cp_chain_me));
extern void getRegUsage_AMD64Instr ( HRegUsage*, AMD64Instr*, Bool );
extern void mapRegs_AMD64Instr ( HRegRemap*, AMD64Instr*, Bool );
extern Bool isMove_AMD64Instr ( AMD64Instr*, HReg*, HReg* );
-extern Int emit_AMD64Instr ( /*MB_MOD*/Bool* is_profInc,
- UChar* buf, Int nbuf,
- AMD64Instr* i,
- Bool mode64,
- VexEndness endness_host,
- void* disp_cp_chain_me_to_slowEP,
- void* disp_cp_chain_me_to_fastEP,
- void* disp_cp_xindir,
- void* disp_cp_xassisted );
+extern Int emit_AMD64Instr ( /*MB_MOD*/Bool* is_profInc,
+ UChar* buf, Int nbuf,
+ AMD64Instr* i,
+ Bool mode64,
+ VexEndness endness_host,
+ const void* disp_cp_chain_me_to_slowEP,
+ const void* disp_cp_chain_me_to_fastEP,
+ const void* disp_cp_xindir,
+ const void* disp_cp_xassisted );
extern void genSpill_AMD64 ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
HReg rreg, Int offset, Bool );
Int emit_ARM64Instr ( /*MB_MOD*/Bool* is_profInc,
UChar* buf, Int nbuf, ARM64Instr* i,
Bool mode64, VexEndness endness_host,
- void* disp_cp_chain_me_to_slowEP,
- void* disp_cp_chain_me_to_fastEP,
- void* disp_cp_xindir,
- void* disp_cp_xassisted )
+ const void* disp_cp_chain_me_to_slowEP,
+ const void* disp_cp_chain_me_to_fastEP,
+ const void* disp_cp_xindir,
+ const void* disp_cp_xassisted )
{
UInt* p = (UInt*)buf;
vassert(nbuf >= 32);
/* movk x9, VG_(disp_cp_chain_me_to_{slowEP,fastEP})[47:32], lsl 32 */
/* movk x9, VG_(disp_cp_chain_me_to_{slowEP,fastEP})[63:48], lsl 48 */
/* blr x9 */
- void* disp_cp_chain_me
+ const void* disp_cp_chain_me
= i->ARM64in.XDirect.toFastEP ? disp_cp_chain_me_to_fastEP
: disp_cp_chain_me_to_slowEP;
p = imm64_to_iregNo_EXACTLY4(p, /*x*/9,
UChar* buf, Int nbuf, ARM64Instr* i,
Bool mode64,
VexEndness endness_host,
- void* disp_cp_chain_me_to_slowEP,
- void* disp_cp_chain_me_to_fastEP,
- void* disp_cp_xindir,
- void* disp_cp_xassisted );
+ const void* disp_cp_chain_me_to_slowEP,
+ const void* disp_cp_chain_me_to_fastEP,
+ const void* disp_cp_xindir,
+ const void* disp_cp_xassisted );
extern void genSpill_ARM64 ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
HReg rreg, Int offset, Bool );
Int emit_ARMInstr ( /*MB_MOD*/Bool* is_profInc,
UChar* buf, Int nbuf, ARMInstr* i,
Bool mode64, VexEndness endness_host,
- void* disp_cp_chain_me_to_slowEP,
- void* disp_cp_chain_me_to_fastEP,
- void* disp_cp_xindir,
- void* disp_cp_xassisted )
+ const void* disp_cp_chain_me_to_slowEP,
+ const void* disp_cp_chain_me_to_fastEP,
+ const void* disp_cp_xindir,
+ const void* disp_cp_xassisted )
{
UInt* p = (UInt*)buf;
vassert(nbuf >= 32);
/* movw r12, lo16(VG_(disp_cp_chain_me_to_{slowEP,fastEP})) */
/* movt r12, hi16(VG_(disp_cp_chain_me_to_{slowEP,fastEP})) */
/* blx r12 (A1) */
- void* disp_cp_chain_me
+ const void* disp_cp_chain_me
= i->ARMin.XDirect.toFastEP ? disp_cp_chain_me_to_fastEP
: disp_cp_chain_me_to_slowEP;
p = imm32_to_iregNo_EXACTLY2(p, /*r*/12,
UChar* buf, Int nbuf, ARMInstr* i,
Bool mode64,
VexEndness endness_host,
- void* disp_cp_chain_me_to_slowEP,
- void* disp_cp_chain_me_to_fastEP,
- void* disp_cp_xindir,
- void* disp_cp_xassisted );
+ const void* disp_cp_chain_me_to_slowEP,
+ const void* disp_cp_chain_me_to_fastEP,
+ const void* disp_cp_xindir,
+ const void* disp_cp_xassisted );
extern void genSpill_ARM ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
HReg rreg, Int offset, Bool );
UChar* buf, Int nbuf, MIPSInstr* i,
Bool mode64,
VexEndness endness_host,
- void* disp_cp_chain_me_to_slowEP,
- void* disp_cp_chain_me_to_fastEP,
- void* disp_cp_xindir,
- void* disp_cp_xassisted )
+ const void* disp_cp_chain_me_to_slowEP,
+ const void* disp_cp_chain_me_to_fastEP,
+ const void* disp_cp_xindir,
+ const void* disp_cp_xassisted )
{
UChar *p = &buf[0];
UChar *ptmp = p;
number of instructions (3) below. */
/* move r9, VG_(disp_cp_chain_me_to_{slowEP,fastEP}) */
/* jr r9 */
- void* disp_cp_chain_me
+ const void* disp_cp_chain_me
= i->Min.XDirect.toFastEP ? disp_cp_chain_me_to_fastEP
: disp_cp_chain_me_to_slowEP;
p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9,
extern void getRegUsage_MIPSInstr (HRegUsage *, MIPSInstr *, Bool);
extern void mapRegs_MIPSInstr (HRegRemap *, MIPSInstr *, Bool mode64);
extern Bool isMove_MIPSInstr (MIPSInstr *, HReg *, HReg *);
-extern Int emit_MIPSInstr (/*MB_MOD*/Bool* is_profInc,
- UChar* buf, Int nbuf, MIPSInstr* i,
- Bool mode64,
- VexEndness endness_host,
- void* disp_cp_chain_me_to_slowEP,
- void* disp_cp_chain_me_to_fastEP,
- void* disp_cp_xindir,
- void* disp_cp_xassisted );
+extern Int emit_MIPSInstr (/*MB_MOD*/Bool* is_profInc,
+ UChar* buf, Int nbuf, MIPSInstr* i,
+ Bool mode64,
+ VexEndness endness_host,
+ const void* disp_cp_chain_me_to_slowEP,
+ const void* disp_cp_chain_me_to_fastEP,
+ const void* disp_cp_xindir,
+ const void* disp_cp_xassisted );
extern void genSpill_MIPS ( /*OUT*/ HInstr ** i1, /*OUT*/ HInstr ** i2,
HReg rreg, Int offset, Bool);
Int emit_PPCInstr ( /*MB_MOD*/Bool* is_profInc,
UChar* buf, Int nbuf, PPCInstr* i,
Bool mode64, VexEndness endness_host,
- void* disp_cp_chain_me_to_slowEP,
- void* disp_cp_chain_me_to_fastEP,
- void* disp_cp_xindir,
- void* disp_cp_xassisted)
+ const void* disp_cp_chain_me_to_slowEP,
+ const void* disp_cp_chain_me_to_fastEP,
+ const void* disp_cp_xindir,
+ const void* disp_cp_xassisted)
{
UChar* p = &buf[0];
vassert(nbuf >= 32);
the first patchable byte. So: don't change the number of
instructions (32-bit: 4, 64-bit: 7) below. */
/* imm32/64-fixed r30, VG_(disp_cp_chain_me_to_{slowEP,fastEP} */
- void* disp_cp_chain_me
+ const void* disp_cp_chain_me
= i->Pin.XDirect.toFastEP ? disp_cp_chain_me_to_fastEP
: disp_cp_chain_me_to_slowEP;
p = mkLoadImm_EXACTLY2or5(
extern void getRegUsage_PPCInstr ( HRegUsage*, PPCInstr*, Bool mode64 );
extern void mapRegs_PPCInstr ( HRegRemap*, PPCInstr* , Bool mode64);
extern Bool isMove_PPCInstr ( PPCInstr*, HReg*, HReg* );
-extern Int emit_PPCInstr ( /*MB_MOD*/Bool* is_profInc,
- UChar* buf, Int nbuf, PPCInstr* i,
- Bool mode64,
- VexEndness endness_host,
- void* disp_cp_chain_me_to_slowEP,
- void* disp_cp_chain_me_to_fastEP,
- void* disp_cp_xindir,
- void* disp_cp_xassisted );
+extern Int emit_PPCInstr ( /*MB_MOD*/Bool* is_profInc,
+ UChar* buf, Int nbuf, PPCInstr* i,
+ Bool mode64,
+ VexEndness endness_host,
+ const void* disp_cp_chain_me_to_slowEP,
+ const void* disp_cp_chain_me_to_fastEP,
+ const void* disp_cp_xindir,
+ const void* disp_cp_xassisted );
extern void genSpill_PPC ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
HReg rreg, Int offsetB, Bool mode64 );
chainXDirect_S390 and unchainXDirect_S390 below. */
static UChar *
s390_insn_xdirect_emit(UChar *buf, const s390_insn *insn,
- void *disp_cp_chain_me_to_slowEP,
- void *disp_cp_chain_me_to_fastEP)
+ const void *disp_cp_chain_me_to_slowEP,
+ const void *disp_cp_chain_me_to_fastEP)
{
/* We're generating chain-me requests here, so we need to be
sure this is actually allowed -- no-redir translations can't
buf = s390_emit_STG(buf, R0, 0, b, DISP20(d));
/* Load the chosen entry point into the scratch reg */
- void *disp_cp_chain_me;
+ const void *disp_cp_chain_me;
disp_cp_chain_me =
insn->variant.xdirect.to_fast_entry ? disp_cp_chain_me_to_fastEP
static UChar *
-s390_insn_xindir_emit(UChar *buf, const s390_insn *insn, void *disp_cp_xindir)
+s390_insn_xindir_emit(UChar *buf, const s390_insn *insn,
+ const void *disp_cp_xindir)
{
/* We're generating transfers that could lead indirectly to a
chain-me, so we need to be sure this is actually allowed --
static UChar *
s390_insn_xassisted_emit(UChar *buf, const s390_insn *insn,
- void *disp_cp_xassisted)
+ const void *disp_cp_xassisted)
{
/* Use ptmp for backpatching conditional jumps. */
UChar *ptmp = buf;
Int
emit_S390Instr(Bool *is_profinc, UChar *buf, Int nbuf, s390_insn *insn,
Bool mode64, VexEndness endness_host,
- void *disp_cp_chain_me_to_slowEP,
- void *disp_cp_chain_me_to_fastEP, void *disp_cp_xindir,
- void *disp_cp_xassisted)
+ const void *disp_cp_chain_me_to_slowEP,
+ const void *disp_cp_chain_me_to_fastEP,
+ const void *disp_cp_xindir,
+ const void *disp_cp_xassisted)
{
UChar *end;
void mapRegs_S390Instr ( HRegRemap *, s390_insn *, Bool );
Bool isMove_S390Instr ( s390_insn *, HReg *, HReg * );
Int emit_S390Instr ( Bool *, UChar *, Int, s390_insn *, Bool,
- VexEndness, void *, void *, void *, void *);
+ VexEndness, const void *, const void *,
+ const void *, const void *);
void getAllocableRegs_S390( Int *, HReg **, Bool );
void genSpill_S390 ( HInstr **, HInstr **, HReg , Int , Bool );
void genReload_S390 ( HInstr **, HInstr **, HReg , Int , Bool );
Int emit_X86Instr ( /*MB_MOD*/Bool* is_profInc,
UChar* buf, Int nbuf, X86Instr* i,
Bool mode64, VexEndness endness_host,
- void* disp_cp_chain_me_to_slowEP,
- void* disp_cp_chain_me_to_fastEP,
- void* disp_cp_xindir,
- void* disp_cp_xassisted )
+ const void* disp_cp_chain_me_to_slowEP,
+ const void* disp_cp_chain_me_to_fastEP,
+ const void* disp_cp_xindir,
+ const void* disp_cp_xassisted )
{
UInt irno, opc, opc_rr, subopc_imm, opc_imma, opc_cl, opc_imm, subopc;
two instructions below. */
/* movl $disp_cp_chain_me_to_{slow,fast}EP,%edx; */
*p++ = 0xBA;
- void* disp_cp_chain_me
+ const void* disp_cp_chain_me
= i->Xin.XDirect.toFastEP ? disp_cp_chain_me_to_fastEP
: disp_cp_chain_me_to_slowEP;
p = emit32(p, (UInt)Ptr_to_ULong(disp_cp_chain_me));
extern void getRegUsage_X86Instr ( HRegUsage*, X86Instr*, Bool );
extern void mapRegs_X86Instr ( HRegRemap*, X86Instr*, Bool );
extern Bool isMove_X86Instr ( X86Instr*, HReg*, HReg* );
-extern Int emit_X86Instr ( /*MB_MOD*/Bool* is_profInc,
- UChar* buf, Int nbuf, X86Instr* i,
- Bool mode64,
- VexEndness endness_host,
- void* disp_cp_chain_me_to_slowEP,
- void* disp_cp_chain_me_to_fastEP,
- void* disp_cp_xindir,
- void* disp_cp_xassisted );
+extern Int emit_X86Instr ( /*MB_MOD*/Bool* is_profInc,
+ UChar* buf, Int nbuf, X86Instr* i,
+ Bool mode64,
+ VexEndness endness_host,
+ const void* disp_cp_chain_me_to_slowEP,
+ const void* disp_cp_chain_me_to_fastEP,
+ const void* disp_cp_xindir,
+ const void* disp_cp_xassisted );
extern void genSpill_X86 ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
HReg rreg, Int offset, Bool );
Int, Int, Bool, Bool, Addr64 );
Int (*emit) ( /*MB_MOD*/Bool*,
UChar*, Int, HInstr*, Bool, VexEndness,
- void*, void*, void*, void* );
+ const void*, const void*, const void*,
+ const void* );
IRExpr* (*specHelper) ( const HChar*, IRExpr**, IRStmt**, Int );
Bool (*preciseMemExnsFn) ( Int, Int );
ppReg = (void(*)(HReg)) ppHRegX86;
iselSB = iselSB_X86;
emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
- void*,void*,void*,void*))
+ const void*,const void*,const void*,
+ const void*))
emit_X86Instr;
host_word_type = Ity_I32;
vassert(are_valid_hwcaps(VexArchX86, vta->archinfo_host.hwcaps));
ppReg = (void(*)(HReg)) ppHRegAMD64;
iselSB = iselSB_AMD64;
emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
- void*,void*,void*,void*))
+ const void*,const void*,const void*,
+ const void*))
emit_AMD64Instr;
host_word_type = Ity_I64;
vassert(are_valid_hwcaps(VexArchAMD64, vta->archinfo_host.hwcaps));
ppReg = (void(*)(HReg)) ppHRegPPC;
iselSB = iselSB_PPC;
emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
- void*,void*,void*,void*))
+ const void*,const void*,const void*,
+ const void*))
emit_PPCInstr;
host_word_type = Ity_I32;
vassert(are_valid_hwcaps(VexArchPPC32, vta->archinfo_host.hwcaps));
ppReg = (void(*)(HReg)) ppHRegPPC;
iselSB = iselSB_PPC;
emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
- void*,void*,void*,void*))
+ const void*,const void*,const void*,
+ const void*))
emit_PPCInstr;
host_word_type = Ity_I64;
vassert(are_valid_hwcaps(VexArchPPC64, vta->archinfo_host.hwcaps));
ppReg = (void(*)(HReg)) ppHRegS390;
iselSB = iselSB_S390;
emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
- void*,void*,void*,void*)) emit_S390Instr;
+ const void*,const void*,const void*,
+ const void*))
+ emit_S390Instr;
host_word_type = Ity_I64;
vassert(are_valid_hwcaps(VexArchS390X, vta->archinfo_host.hwcaps));
vassert(vta->archinfo_host.endness == VexEndnessBE);
ppReg = (void(*)(HReg)) ppHRegARM;
iselSB = iselSB_ARM;
emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
- void*,void*,void*,void*))
+ const void*,const void*,const void*,
+ const void*))
emit_ARMInstr;
host_word_type = Ity_I32;
vassert(are_valid_hwcaps(VexArchARM, vta->archinfo_host.hwcaps));
ppReg = (void(*)(HReg)) ppHRegARM64;
iselSB = iselSB_ARM64;
emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
- void*,void*,void*,void*))
+ const void*,const void*,const void*,
+ const void*))
emit_ARM64Instr;
host_word_type = Ity_I64;
vassert(are_valid_hwcaps(VexArchARM64, vta->archinfo_host.hwcaps));
ppReg = (void(*)(HReg)) ppHRegMIPS;
iselSB = iselSB_MIPS;
emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
- void*,void*,void*,void*))
+ const void*,const void*,const void*,
+ const void*))
emit_MIPSInstr;
host_word_type = Ity_I32;
vassert(are_valid_hwcaps(VexArchMIPS32, vta->archinfo_host.hwcaps));
ppReg = (void(*)(HReg)) ppHRegMIPS;
iselSB = iselSB_MIPS;
emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
- void*,void*,void*,void*))
+ const void*,const void*,const void*,
+ const void*))
emit_MIPSInstr;
host_word_type = Ity_I64;
vassert(are_valid_hwcaps(VexArchMIPS64, vta->archinfo_host.hwcaps));
vex_printf("can't show code due to extents > 1\n");
} else {
/* HACK */
- UChar* p = (UChar*)vta->guest_bytes;
+ const UChar* p = vta->guest_bytes;
UInt sum = 0;
UInt guest_bytes_read = (UInt)vta->guest_extents->len[0];
vex_printf("GuestBytes %llx %u ", vta->guest_bytes_addr,
/* IN: the block to translate, and its guest address. */
/* where are the actual bytes in the host's address space? */
- UChar* guest_bytes;
+ const UChar* guest_bytes;
/* where do the bytes really come from in the guest's aspace?
This is the post-redirection guest address. Not that Vex
understands anything about redirection; that is all done on
FIXME: update this comment
*/
- void* disp_cp_chain_me_to_slowEP;
- void* disp_cp_chain_me_to_fastEP;
- void* disp_cp_xindir;
- void* disp_cp_xassisted;
+ const void* disp_cp_chain_me_to_slowEP;
+ const void* disp_cp_chain_me_to_fastEP;
+ const void* disp_cp_xindir;
+ const void* disp_cp_xassisted;
}
VexTranslateArgs;