VEX aspects -- pretty minimal.
Authors of this port:
Petr Pavlu setup@dagobah.cz
Ivo Raisr ivosh@ivosh.net
Theo Schlossnagle theo@omniti.com
git-svn-id: svn://svn.valgrind.org/vex/trunk@3165
#define AMD64G_CC_MASK_C (1ULL << AMD64G_CC_SHIFT_C)
#define AMD64G_CC_MASK_P (1ULL << AMD64G_CC_SHIFT_P)
+/* additional rflags masks */
+#define AMD64G_CC_SHIFT_ID 21
+#define AMD64G_CC_SHIFT_AC 18
+#define AMD64G_CC_SHIFT_D 10
+
+#define AMD64G_CC_MASK_ID (1ULL << AMD64G_CC_SHIFT_ID)
+#define AMD64G_CC_MASK_AC (1ULL << AMD64G_CC_SHIFT_AC)
+#define AMD64G_CC_MASK_D (1ULL << AMD64G_CC_SHIFT_D)
+
/* FPU flag masks */
#define AMD64G_FC_SHIFT_C3 14
#define AMD64G_FC_SHIFT_C2 10
return rflags;
}
+/* VISIBLE TO LIBVEX CLIENT */
+void
+LibVEX_GuestAMD64_put_rflags ( ULong rflags,
+ /*MOD*/VexGuestAMD64State* vex_state )
+{
+ /* D flag */
+ if (rflags & AMD64G_CC_MASK_D) {
+ vex_state->guest_DFLAG = -1;
+ rflags &= ~AMD64G_CC_MASK_D;
+ }
+ else
+ vex_state->guest_DFLAG = 1;
+
+ /* ID flag */
+ if (rflags & AMD64G_CC_MASK_ID) {
+ vex_state->guest_IDFLAG = 1;
+ rflags &= ~AMD64G_CC_MASK_ID;
+ }
+ else
+ vex_state->guest_IDFLAG = 0;
+
+ /* AC flag */
+ if (rflags & AMD64G_CC_MASK_AC) {
+ vex_state->guest_ACFLAG = 1;
+ rflags &= ~AMD64G_CC_MASK_AC;
+ }
+ else
+ vex_state->guest_ACFLAG = 0;
+
+ UInt cc_mask = AMD64G_CC_MASK_O | AMD64G_CC_MASK_S | AMD64G_CC_MASK_Z |
+ AMD64G_CC_MASK_A | AMD64G_CC_MASK_C | AMD64G_CC_MASK_P;
+ vex_state->guest_CC_OP = AMD64G_CC_OP_COPY;
+ vex_state->guest_CC_DEP1 = rflags & cc_mask;
+ vex_state->guest_CC_DEP2 = 0;
+ vex_state->guest_CC_NDEP = 0;
+}
+
/* VISIBLE TO LIBVEX CLIENT */
void
LibVEX_GuestAMD64_put_rflag_c ( ULong new_carry_flag,
}
-/* CALLED FROM GENERATED CODE */
-/* DIRTY HELPER (reads guest state, writes guest mem) */
-/* NOTE: only handles 32-bit format (no REX.W on the insn) */
-void amd64g_dirtyhelper_FXSAVE_ALL_EXCEPT_XMM ( VexGuestAMD64State* gst,
- HWord addr )
+static
+void do_fxsave ( VexGuestAMD64State* gst, HWord addr, Bool save_xmm_regs )
{
/* Derived from values obtained from
vendor_id : AuthenticAMD
dstS[7] = 0;
}
- /* That's the first 160 bytes of the image done. Now only %xmm0
- .. %xmm15 remain to be copied, and we let the generated IR do
- that, so as to make Memcheck's definedness flow for the non-XMM
- parts independant from that of the all the other control and
- status words in the structure. This avoids the false positives
- shown in #291310. */
+ /* That's the first 160 bytes of the image done. */
+ if (save_xmm_regs == True) {
+ /* Now only %xmm0 .. %xmm15 remain to be copied. If the host is
+ big-endian, these need to be byte-swapped. */
+ U128 *xmm = (U128 *)(addr + 160);
+
+ vassert(host_is_little_endian());
+
+# define COPY_U128(_dst,_src) \
+ do { _dst[0] = _src[0]; _dst[1] = _src[1]; \
+ _dst[2] = _src[2]; _dst[3] = _src[3]; } \
+ while (0)
+
+ COPY_U128( xmm[0], gst->guest_YMM0 );
+ COPY_U128( xmm[1], gst->guest_YMM1 );
+ COPY_U128( xmm[2], gst->guest_YMM2 );
+ COPY_U128( xmm[3], gst->guest_YMM3 );
+ COPY_U128( xmm[4], gst->guest_YMM4 );
+ COPY_U128( xmm[5], gst->guest_YMM5 );
+ COPY_U128( xmm[6], gst->guest_YMM6 );
+ COPY_U128( xmm[7], gst->guest_YMM7 );
+ COPY_U128( xmm[8], gst->guest_YMM8 );
+ COPY_U128( xmm[9], gst->guest_YMM9 );
+ COPY_U128( xmm[10], gst->guest_YMM10 );
+ COPY_U128( xmm[11], gst->guest_YMM11 );
+ COPY_U128( xmm[12], gst->guest_YMM12 );
+ COPY_U128( xmm[13], gst->guest_YMM13 );
+ COPY_U128( xmm[14], gst->guest_YMM14 );
+ COPY_U128( xmm[15], gst->guest_YMM15 );
+# undef COPY_U128
+ } else {
+ /* We let the generated IR to copy remaining %xmm0 .. %xmm15, so as to
+ make Memcheck's definedness flow for the non-XMM parts independent from
+ that of the all the other control and status words in the structure.
+ This avoids the false positives shown in #291310. */
+ }
}
-/* CALLED FROM GENERATED CODE */
-/* DIRTY HELPER (writes guest state, reads guest mem) */
-VexEmNote amd64g_dirtyhelper_FXRSTOR_ALL_EXCEPT_XMM ( VexGuestAMD64State* gst,
- HWord addr )
+static
+VexEmNote do_fxrstor ( VexGuestAMD64State* gst, HWord addr,
+ Bool rstor_xmm_regs )
{
Fpu_State tmp;
VexEmNote warnX87 = EmNote_NONE;
UShort fp_tags;
Int r, stno, i;
- /* Don't restore %xmm0 .. %xmm15, for the same reasons that
- amd64g_dirtyhelper_FXSAVE_ALL_EXCEPT_XMM doesn't save them. See
- comment in that function for details. */
+ if (rstor_xmm_regs == True) {
+ /* Restore %xmm0 .. %xmm15. If the host is big-endian, these need
+ to be byte-swapped. */
+ U128 *xmm = (U128 *)(addr + 160);
+
+ vassert(host_is_little_endian());
+
+# define COPY_U128(_dst,_src) \
+ do { _dst[0] = _src[0]; _dst[1] = _src[1]; \
+ _dst[2] = _src[2]; _dst[3] = _src[3]; } \
+ while (0)
+
+ COPY_U128( gst->guest_YMM0, xmm[0] );
+ COPY_U128( gst->guest_YMM1, xmm[1] );
+ COPY_U128( gst->guest_YMM2, xmm[2] );
+ COPY_U128( gst->guest_YMM3, xmm[3] );
+ COPY_U128( gst->guest_YMM4, xmm[4] );
+ COPY_U128( gst->guest_YMM5, xmm[5] );
+ COPY_U128( gst->guest_YMM6, xmm[6] );
+ COPY_U128( gst->guest_YMM7, xmm[7] );
+ COPY_U128( gst->guest_YMM8, xmm[8] );
+ COPY_U128( gst->guest_YMM9, xmm[9] );
+ COPY_U128( gst->guest_YMM10, xmm[10] );
+ COPY_U128( gst->guest_YMM11, xmm[11] );
+ COPY_U128( gst->guest_YMM12, xmm[12] );
+ COPY_U128( gst->guest_YMM13, xmm[13] );
+ COPY_U128( gst->guest_YMM14, xmm[14] );
+ COPY_U128( gst->guest_YMM15, xmm[15] );
+
+# undef COPY_U128
+ } else {
+ /* Don't restore %xmm0 .. %xmm15, for the same reasons that
+ do_fxsave(save_xmm_regs = False) doesn't save them. See
+ comment in that function for details. */
+ }
/* Copy the x87 registers out of the image, into a temporary
Fpu_State struct. */
}
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (reads guest state, writes guest mem) */
+/* NOTE: only handles 32-bit format (no REX.W on the insn) */
+/* NOTE: does not save XMM registers - see do_fxsave() for details */
+void amd64g_dirtyhelper_FXSAVE_ALL_EXCEPT_XMM ( VexGuestAMD64State* gst,
+ HWord addr )
+{
+ do_fxsave( gst, addr, False );
+}
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (writes guest state, reads guest mem) */
+VexEmNote amd64g_dirtyhelper_FXRSTOR_ALL_EXCEPT_XMM ( VexGuestAMD64State* gst,
+ HWord addr )
+{
+ return do_fxrstor( gst, addr, False );
+}
+
+
/* DIRTY HELPER (writes guest state) */
/* Initialise the x87 FPU state as per 'finit'. */
void amd64g_dirtyhelper_FINIT ( VexGuestAMD64State* gst )
return ew;
}
+/* VISIBLE TO LIBVEX CLIENT */
+/* Do FXSAVE from the supplied VexGuestAMD64tate structure and store the
+ result at the given address which represents a buffer of at least 416
+ bytes. Saves also XMM registers. */
+void LibVEX_GuestAMD64_fxsave ( /*IN*/VexGuestAMD64State* gst,
+ /*OUT*/HWord fp_state )
+{
+ do_fxsave( gst, fp_state, True );
+}
+
+/* VISIBLE TO LIBVEX CLIENT */
+/* Do FXRSTOR from the supplied address and store read values to the given
+ VexGuestAMD64State structure. Restores also XMM registers. */
+VexEmNote LibVEX_GuestAMD64_fxrstor ( /*IN*/HWord fp_state,
+ /*MOD*/VexGuestAMD64State* gst )
+{
+ return do_fxrstor( gst, fp_state, True );
+}
+
/*---------------------------------------------------------------*/
/*--- Misc integer helpers, including rotates and CPUID. ---*/
return grp8_names[opc_aux];
}
-//.. static const HChar* nameSReg ( UInt sreg )
-//.. {
-//.. switch (sreg) {
-//.. case R_ES: return "%es";
-//.. case R_CS: return "%cs";
-//.. case R_SS: return "%ss";
-//.. case R_DS: return "%ds";
-//.. case R_FS: return "%fs";
-//.. case R_GS: return "%gs";
-//.. default: vpanic("nameSReg(x86)");
-//.. }
-//.. }
+static const HChar* nameSReg ( UInt sreg )
+{
+ switch (sreg) {
+ case R_ES: return "%es";
+ case R_CS: return "%cs";
+ case R_SS: return "%ss";
+ case R_DS: return "%ds";
+ case R_FS: return "%fs";
+ case R_GS: return "%gs";
+ default: vpanic("nameSReg(amd64)");
+ }
+}
static const HChar* nameMMXReg ( Int mmxreg )
{
//.. return len+delta0;
//.. }
//.. }
-//..
-//..
+
+/* Handle move instructions of the form
+ mov S, E meaning
+ mov sreg, reg-or-mem
+ Is passed the a ptr to the modRM byte, and the data size. Returns
+ the address advanced completely over this instruction.
+
+ VEX does not currently simulate segment registers on AMD64 which means that
+ instead of moving a value of a segment register, zero is moved to the
+ destination. The zero value represents a null (unused) selector. This is
+ not correct (especially for the %cs, %fs and %gs registers) but it seems to
+ provide a sufficient simulation for currently seen programs that use this
+ instruction. If some program actually decides to use the obtained segment
+ selector for something meaningful then the zero value should be a clear
+ indicator that there is some problem.
+
+ S(src) is sreg.
+ E(dst) is reg-or-mem
+
+ If E is reg, --> PUT $0, %E
+
+ If E is mem, --> (getAddr E) -> tmpa
+ ST $0, (tmpa)
+*/
+static
+ULong dis_mov_S_E ( const VexAbiInfo* vbi,
+ Prefix pfx,
+ Int size,
+ Long delta0 )
+{
+ Int len;
+ UChar rm = getUChar(delta0);
+ HChar dis_buf[50];
+
+ if (epartIsReg(rm)) {
+ putIRegE(size, pfx, rm, mkU(szToITy(size), 0));
+ DIP("mov %s,%s\n", nameSReg(gregOfRexRM(pfx, rm)),
+ nameIRegE(size, pfx, rm));
+ return 1+delta0;
+ }
+
+ /* E refers to memory */
+ {
+ IRTemp addr = disAMode(&len, vbi, pfx, delta0, dis_buf, 0);
+ storeLE(mkexpr(addr), mkU16(0));
+ DIP("mov %s,%s\n", nameSReg(gregOfRexRM(pfx, rm)),
+ dis_buf);
+ return len+delta0;
+ }
+}
+
//.. static
//.. void dis_push_segreg ( UInt sreg, Int sz )
//.. {
delta = dis_mov_E_G(vbi, pfx, sz, delta);
return delta;
+ case 0x8C: /* MOV S,E -- MOV from a SEGMENT REGISTER */
+ if (haveF2orF3(pfx)) goto decode_failure;
+ delta = dis_mov_S_E(vbi, pfx, sz, delta);
+ return delta;
+
case 0x8D: /* LEA M,Gv */
if (haveF2orF3(pfx)) goto decode_failure;
if (sz != 4 && sz != 8)
DIP("int $0x3\n");
return delta;
+ case 0xCD: /* INT imm8 */
+ d64 = getUChar(delta); delta++;
+
+ /* Handle int $0xD2 (Solaris fasttrap syscalls). */
+ if (d64 == 0xD2) {
+ jmp_lit(dres, Ijk_Sys_int210, guest_RIP_bbstart + delta);
+ vassert(dres->whatNext == Dis_StopHere);
+ DIP("int $0xD2\n");
+ return delta;
+ }
+ goto decode_failure;
+
case 0xD0: { /* Grp2 1,Eb */
Bool decode_OK = True;
if (haveF2orF3(pfx)) goto decode_failure;
#define X86G_CC_MASK_C (1 << X86G_CC_SHIFT_C)
#define X86G_CC_MASK_P (1 << X86G_CC_SHIFT_P)
+/* additional eflags masks */
+#define X86G_CC_SHIFT_ID 21
+#define X86G_CC_SHIFT_AC 18
+#define X86G_CC_SHIFT_D 10
+
+#define X86G_CC_MASK_ID (1 << X86G_CC_SHIFT_ID)
+#define X86G_CC_MASK_AC (1 << X86G_CC_SHIFT_AC)
+#define X86G_CC_MASK_D (1 << X86G_CC_SHIFT_D)
+
/* FPU flag masks */
#define X86G_FC_SHIFT_C3 14
#define X86G_FC_SHIFT_C2 10
UInt dflag = vex_state->guest_DFLAG;
vassert(dflag == 1 || dflag == 0xFFFFFFFF);
if (dflag == 0xFFFFFFFF)
- eflags |= (1<<10);
+ eflags |= X86G_CC_MASK_D;
if (vex_state->guest_IDFLAG == 1)
- eflags |= (1<<21);
+ eflags |= X86G_CC_MASK_ID;
if (vex_state->guest_ACFLAG == 1)
- eflags |= (1<<18);
+ eflags |= X86G_CC_MASK_AC;
return eflags;
}
+/* VISIBLE TO LIBVEX CLIENT */
+void
+LibVEX_GuestX86_put_eflags ( UInt eflags,
+ /*MOD*/VexGuestX86State* vex_state )
+{
+ /* D flag */
+ if (eflags & X86G_CC_MASK_D) {
+ vex_state->guest_DFLAG = 0xFFFFFFFF;
+ eflags &= ~X86G_CC_MASK_D;
+ }
+ else
+ vex_state->guest_DFLAG = 1;
+
+ /* ID flag */
+ if (eflags & X86G_CC_MASK_ID) {
+ vex_state->guest_IDFLAG = 1;
+ eflags &= ~X86G_CC_MASK_ID;
+ }
+ else
+ vex_state->guest_IDFLAG = 0;
+
+ /* AC flag */
+ if (eflags & X86G_CC_MASK_AC) {
+ vex_state->guest_ACFLAG = 1;
+ eflags &= ~X86G_CC_MASK_AC;
+ }
+ else
+ vex_state->guest_ACFLAG = 0;
+
+ UInt cc_mask = X86G_CC_MASK_O | X86G_CC_MASK_S | X86G_CC_MASK_Z |
+ X86G_CC_MASK_A | X86G_CC_MASK_C | X86G_CC_MASK_P;
+ vex_state->guest_CC_OP = X86G_CC_OP_COPY;
+ vex_state->guest_CC_DEP1 = eflags & cc_mask;
+ vex_state->guest_CC_DEP2 = 0;
+ vex_state->guest_CC_NDEP = 0;
+}
+
/* VISIBLE TO LIBVEX CLIENT */
void
LibVEX_GuestX86_put_eflag_c ( UInt new_carry_flag,
warnXMM = (VexEmNote)(w64 >> 32);
- gst->guest_SSEROUND = (UInt)w64;
+ gst->guest_SSEROUND = w64 & 0xFFFFFFFF;
}
/* Prefer an X87 emwarn over an XMM one, if both exist. */
return do_put_x87( False/*don't move regs*/, (UChar*)addr, gst);
}
+/* VISIBLE TO LIBVEX CLIENT */
+/* Do x87 save from the supplied VexGuestX86State structure and store the
+ result at the given address which represents a buffer of at least 108
+ bytes. */
+void LibVEX_GuestX86_get_x87 ( /*IN*/VexGuestX86State* vex_state,
+ /*OUT*/UChar* x87_state )
+{
+ do_get_x87 ( vex_state, x87_state );
+}
+
+/* VISIBLE TO LIBVEX CLIENT */
+/* Do x87 restore from the supplied address and store read values to the given
+ VexGuestX86State structure. */
+VexEmNote LibVEX_GuestX86_put_x87 ( /*IN*/UChar* x87_state,
+ /*MOD*/VexGuestX86State* vex_state )
+{
+ return do_put_x87 ( True/*moveRegs*/, x87_state, vex_state );
+}
+
+/* VISIBLE TO LIBVEX CLIENT */
+/* Return mxcsr from the supplied VexGuestX86State structure. */
+UInt LibVEX_GuestX86_get_mxcsr ( /*IN*/VexGuestX86State* vex_state )
+{
+ return x86g_create_mxcsr ( vex_state->guest_SSEROUND );
+}
+
+/* VISIBLE TO LIBVEX CLIENT */
+/* Modify the given VexGuestX86State structure according to the passed mxcsr
+ value. */
+VexEmNote LibVEX_GuestX86_put_mxcsr ( /*IN*/UInt mxcsr,
+ /*MOD*/VexGuestX86State* vex_state)
+{
+ ULong w64 = x86g_check_ldmxcsr( mxcsr );
+ vex_state->guest_SSEROUND = w64 & 0xFFFFFFFF;
+ return (VexEmNote)(w64 >> 32);
+}
/*---------------------------------------------------------------*/
/*--- Misc integer helpers, including rotates and CPUID. ---*/
}
/* Handle int $0x80 (linux syscalls), int $0x81 and $0x82
- (darwin syscalls). As part of this, note where we are, so we
+ (darwin syscalls), int $0x91 (Solaris syscalls) and int $0xD2
+ (Solaris fasttrap syscalls). As part of this, note where we are, so we
can back up the guest to this point if the syscall needs to
be restarted. */
- if (d32 == 0x80) {
- stmt( IRStmt_Put( OFFB_IP_AT_SYSCALL,
- mkU32(guest_EIP_curr_instr) ) );
- jmp_lit(&dres, Ijk_Sys_int128, ((Addr32)guest_EIP_bbstart)+delta);
- vassert(dres.whatNext == Dis_StopHere);
- DIP("int $0x80\n");
+ IRJumpKind jump_kind;
+ switch (d32) {
+ case 0x80:
+ jump_kind = Ijk_Sys_int128;
break;
- }
- if (d32 == 0x81) {
- stmt( IRStmt_Put( OFFB_IP_AT_SYSCALL,
- mkU32(guest_EIP_curr_instr) ) );
- jmp_lit(&dres, Ijk_Sys_int129, ((Addr32)guest_EIP_bbstart)+delta);
- vassert(dres.whatNext == Dis_StopHere);
- DIP("int $0x81\n");
+ case 0x81:
+ jump_kind = Ijk_Sys_int129;
break;
- }
- if (d32 == 0x82) {
- stmt( IRStmt_Put( OFFB_IP_AT_SYSCALL,
- mkU32(guest_EIP_curr_instr) ) );
- jmp_lit(&dres, Ijk_Sys_int130, ((Addr32)guest_EIP_bbstart)+delta);
- vassert(dres.whatNext == Dis_StopHere);
- DIP("int $0x82\n");
+ case 0x82:
+ jump_kind = Ijk_Sys_int130;
+ break;
+ case 0x91:
+ jump_kind = Ijk_Sys_int145;
+ break;
+ case 0xD2:
+ jump_kind = Ijk_Sys_int210;
break;
+ default:
+ /* none of the above */
+ goto decode_failure;
}
- /* none of the above */
- goto decode_failure;
+ stmt( IRStmt_Put( OFFB_IP_AT_SYSCALL,
+ mkU32(guest_EIP_curr_instr) ) );
+ jmp_lit(&dres, jump_kind, ((Addr32)guest_EIP_bbstart)+delta);
+ vassert(dres.whatNext == Dis_StopHere);
+ DIP("int $0x%x\n", (Int)d32);
+ break;
/* ------------------------ Jcond, byte offset --------- */
case 0x05: /* AMD's syscall */
stmt( IRStmt_Put( OFFB_IP_AT_SYSCALL,
- mkU32(guest_EIP_curr_instr) ) );
+ mkU32(guest_EIP_curr_instr) ) );
jmp_lit(&dres, Ijk_Sys_syscall, ((Addr32)guest_EIP_bbstart)+delta);
vassert(dres.whatNext == Dis_StopHere);
DIP("syscall\n");
case Ijk_ClientReq: trcval = VEX_TRC_JMP_CLIENTREQ; break;
case Ijk_Sys_syscall: trcval = VEX_TRC_JMP_SYS_SYSCALL; break;
case Ijk_Sys_int32: trcval = VEX_TRC_JMP_SYS_INT32; break;
+ case Ijk_Sys_int210: trcval = VEX_TRC_JMP_SYS_INT210; break;
case Ijk_Yield: trcval = VEX_TRC_JMP_YIELD; break;
case Ijk_EmWarn: trcval = VEX_TRC_JMP_EMWARN; break;
case Ijk_MapFail: trcval = VEX_TRC_JMP_MAPFAIL; break;
case Ijk_SigSEGV:
case Ijk_SigTRAP:
case Ijk_Sys_syscall:
+ case Ijk_Sys_int210:
case Ijk_InvalICache:
case Ijk_Yield:
{
case Ijk_SigSEGV:
case Ijk_SigTRAP:
case Ijk_Sys_syscall:
+ case Ijk_Sys_int210:
case Ijk_InvalICache:
case Ijk_Yield: {
HReg r = iselIntExpr_R(env, next);
case Ijk_Sys_int128: trcval = VEX_TRC_JMP_SYS_INT128; break;
case Ijk_Sys_int129: trcval = VEX_TRC_JMP_SYS_INT129; break;
case Ijk_Sys_int130: trcval = VEX_TRC_JMP_SYS_INT130; break;
+ case Ijk_Sys_int145: trcval = VEX_TRC_JMP_SYS_INT145; break;
+ case Ijk_Sys_int210: trcval = VEX_TRC_JMP_SYS_INT210; break;
case Ijk_Sys_sysenter: trcval = VEX_TRC_JMP_SYS_SYSENTER; break;
case Ijk_Yield: trcval = VEX_TRC_JMP_YIELD; break;
case Ijk_EmWarn: trcval = VEX_TRC_JMP_EMWARN; break;
case Ijk_Sys_int128:
case Ijk_Sys_int129:
case Ijk_Sys_int130:
+ case Ijk_Sys_int145:
+ case Ijk_Sys_int210:
case Ijk_Sys_syscall:
case Ijk_Sys_sysenter:
case Ijk_InvalICache:
case Ijk_Sys_int128:
case Ijk_Sys_int129:
case Ijk_Sys_int130:
+ case Ijk_Sys_int145:
+ case Ijk_Sys_int210:
case Ijk_Sys_syscall:
case Ijk_Sys_sysenter:
case Ijk_InvalICache:
case Ijk_Sys_int128: vex_printf("Sys_int128"); break;
case Ijk_Sys_int129: vex_printf("Sys_int129"); break;
case Ijk_Sys_int130: vex_printf("Sys_int130"); break;
+ case Ijk_Sys_int145: vex_printf("Sys_int145"); break;
+ case Ijk_Sys_int210: vex_printf("Sys_int210"); break;
case Ijk_Sys_sysenter: vex_printf("Sys_sysenter"); break;
default: vpanic("ppIRJumpKind");
}
guest_amd64_assume_fs_is_const
guest is amd64-linux ==> True
guest is amd64-darwin ==> False
+ guest is amd64-solaris ==> True
guest is other ==> inapplicable
guest_amd64_assume_gs_is_const
guest is amd64-darwin ==> True
guest is amd64-linux ==> True
+ guest is amd64-solaris ==> False
guest is other ==> inapplicable
guest_ppc_zap_RZ_at_blr
/* AMD64 GUESTS only: should we translate %fs-prefixed
instructions using the assumption that %fs always contains
- the same value? (typically zero on linux) */
+ the same value? (typically zero on linux and solaris) */
Bool guest_amd64_assume_fs_is_const;
/* AMD64 GUESTS only: should we translate %gs-prefixed
#define __LIBVEX_PUB_GUEST_AMD64_H
#include "libvex_basictypes.h"
+#include "libvex_emnote.h"
/*---------------------------------------------------------------*/
all the old x87 FPU gunk
segment registers */
- /* HACK to e.g. make tls on amd64-linux work. %fs only ever seems to
- hold a constant value (zero on linux main thread, 0x63 in other
+ /* HACK to e.g. make tls on amd64-linux/solaris work. %fs only ever seems
+ to hold a constant value (zero on linux main thread, 0x63 in other
threads), and so guest_FS_CONST holds
the 64-bit offset associated with this constant %fs value. */
/* 200 */ ULong guest_FS_CONST;
%gs only ever seems to hold a constant value (e.g. 0x60 on darwin,
0x6b on linux), and so guest_GS_CONST holds the 64-bit offset
associated with this constant %gs value. (A direct analogue
- of the %fs-const hack for amd64-linux). */
+ of the %fs-const hack for amd64-linux/solaris). */
ULong guest_GS_CONST;
/* Needed for Darwin (but mandated for all guest architectures):
extern
ULong LibVEX_GuestAMD64_get_rflags ( /*IN*/const VexGuestAMD64State* vex_state );
+/* Put rflags into the given state. */
+extern
+void LibVEX_GuestAMD64_put_rflags ( ULong rflags,
+ /*MOD*/VexGuestAMD64State* vex_state );
+
/* Set the carry flag in the given state to 'new_carry_flag', which
should be zero or one. */
extern
LibVEX_GuestAMD64_put_rflag_c ( ULong new_carry_flag,
/*MOD*/VexGuestAMD64State* vex_state );
+/* Do FXSAVE from the supplied VexGuestAMD64tate structure and store the
+ result at the given address which represents a buffer of at least 416
+ bytes. */
+extern
+void LibVEX_GuestAMD64_fxsave ( /*IN*/VexGuestAMD64State* gst,
+ /*OUT*/HWord fp_state );
+
+/* Do FXRSTOR from the supplied address and store read values to the given
+ VexGuestAMD64State structure. */
+extern
+VexEmNote LibVEX_GuestAMD64_fxrstor ( /*IN*/HWord fp_state,
+ /*MOD*/VexGuestAMD64State* gst );
#endif /* ndef __LIBVEX_PUB_GUEST_AMD64_H */
#define __LIBVEX_PUB_GUEST_X86_H
#include "libvex_basictypes.h"
+#include "libvex_emnote.h"
/*---------------------------------------------------------------*/
extern
UInt LibVEX_GuestX86_get_eflags ( /*IN*/const VexGuestX86State* vex_state );
+/* Put eflags into the given state. */
+extern
+void LibVEX_GuestX86_put_eflags ( UInt eflags,
+ /*MOD*/VexGuestX86State* vex_state );
+
/* Set the carry flag in the given state to 'new_carry_flag', which
should be zero or one. */
extern
LibVEX_GuestX86_put_eflag_c ( UInt new_carry_flag,
/*MOD*/VexGuestX86State* vex_state );
+/* Do x87 save from the supplied VexGuestX86State structure and store the
+ result at the given address which represents a buffer of at least 108
+ bytes. */
+extern
+void LibVEX_GuestX86_get_x87 ( /*IN*/VexGuestX86State* vex_state,
+ /*OUT*/UChar* x87_state );
+
+/* Do x87 restore from the supplied address and store read values to the given
+ VexGuestX86State structure. */
+extern
+VexEmNote LibVEX_GuestX86_put_x87 ( /*IN*/UChar* x87_state,
+ /*MOD*/VexGuestX86State* vex_state);
+
+/* Return mxcsr from the supplied VexGuestX86State structure. */
+extern
+UInt LibVEX_GuestX86_get_mxcsr ( /*IN*/VexGuestX86State* vex_state );
+
+/* Modify the given VexGuestX86State structure according to the passed mxcsr
+ value. */
+extern
+VexEmNote LibVEX_GuestX86_put_mxcsr ( /*IN*/UInt mxcsr,
+ /*MOD*/VexGuestX86State* vex_state);
+
#endif /* ndef __LIBVEX_PUB_GUEST_X86_H */
/*---------------------------------------------------------------*/
Ijk_Sys_int128, /* amd64/x86 'int $0x80' */
Ijk_Sys_int129, /* amd64/x86 'int $0x81' */
Ijk_Sys_int130, /* amd64/x86 'int $0x82' */
+ Ijk_Sys_int145, /* amd64/x86 'int $0x91' */
+ Ijk_Sys_int210, /* amd64/x86 'int $0xD2' */
Ijk_Sys_sysenter /* x86 'sysenter'. guest_EIP becomes
invalid at the point this happens. */
}
#define VEX_TRC_JMP_SYS_INT128 77 /* do syscall before continuing */
#define VEX_TRC_JMP_SYS_INT129 89 /* do syscall before continuing */
#define VEX_TRC_JMP_SYS_INT130 91 /* do syscall before continuing */
+#define VEX_TRC_JMP_SYS_INT145 111 /* do syscall before continuing */
+#define VEX_TRC_JMP_SYS_INT210 113 /* do syscall before continuing */
#define VEX_TRC_JMP_SYS_SYSENTER 79 /* do syscall before continuing */