vex_state->guest_LO = 0; /* Multiply and divide register lower result */
/* FPU Registers */
- vex_state->guest_f0 = 0xffffffffffffffff; /* Floting point registers */
- vex_state->guest_f1 = 0xffffffffffffffff;
- vex_state->guest_f2 = 0xffffffffffffffff;
- vex_state->guest_f3 = 0xffffffffffffffff;
- vex_state->guest_f4 = 0xffffffffffffffff;
- vex_state->guest_f5 = 0xffffffffffffffff;
- vex_state->guest_f6 = 0xffffffffffffffff;
- vex_state->guest_f7 = 0xffffffffffffffff;
- vex_state->guest_f8 = 0xffffffffffffffff;
- vex_state->guest_f9 = 0xffffffffffffffff;
- vex_state->guest_f10 = 0xffffffffffffffff;
- vex_state->guest_f11 = 0xffffffffffffffff;
- vex_state->guest_f12 = 0xffffffffffffffff;
- vex_state->guest_f13 = 0xffffffffffffffff;
- vex_state->guest_f14 = 0xffffffffffffffff;
- vex_state->guest_f15 = 0xffffffffffffffff;
- vex_state->guest_f16 = 0xffffffffffffffff;
- vex_state->guest_f17 = 0xffffffffffffffff;
- vex_state->guest_f18 = 0xffffffffffffffff;
- vex_state->guest_f19 = 0xffffffffffffffff;
- vex_state->guest_f20 = 0xffffffffffffffff;
- vex_state->guest_f21 = 0xffffffffffffffff;
- vex_state->guest_f22 = 0xffffffffffffffff;
- vex_state->guest_f23 = 0xffffffffffffffff;
- vex_state->guest_f24 = 0xffffffffffffffff;
- vex_state->guest_f25 = 0xffffffffffffffff;
- vex_state->guest_f26 = 0xffffffffffffffff;
- vex_state->guest_f27 = 0xffffffffffffffff;
- vex_state->guest_f28 = 0xffffffffffffffff;
- vex_state->guest_f29 = 0xffffffffffffffff;
- vex_state->guest_f30 = 0xffffffffffffffff;
- vex_state->guest_f31 = 0xffffffffffffffff;
+ vex_state->guest_f0 = 0xffffffffffffffffULL; /* Floting point registers */
+ vex_state->guest_f1 = 0xffffffffffffffffULL;
+ vex_state->guest_f2 = 0xffffffffffffffffULL;
+ vex_state->guest_f3 = 0xffffffffffffffffULL;
+ vex_state->guest_f4 = 0xffffffffffffffffULL;
+ vex_state->guest_f5 = 0xffffffffffffffffULL;
+ vex_state->guest_f6 = 0xffffffffffffffffULL;
+ vex_state->guest_f7 = 0xffffffffffffffffULL;
+ vex_state->guest_f8 = 0xffffffffffffffffULL;
+ vex_state->guest_f9 = 0xffffffffffffffffULL;
+ vex_state->guest_f10 = 0xffffffffffffffffULL;
+ vex_state->guest_f11 = 0xffffffffffffffffULL;
+ vex_state->guest_f12 = 0xffffffffffffffffULL;
+ vex_state->guest_f13 = 0xffffffffffffffffULL;
+ vex_state->guest_f14 = 0xffffffffffffffffULL;
+ vex_state->guest_f15 = 0xffffffffffffffffULL;
+ vex_state->guest_f16 = 0xffffffffffffffffULL;
+ vex_state->guest_f17 = 0xffffffffffffffffULL;
+ vex_state->guest_f18 = 0xffffffffffffffffULL;
+ vex_state->guest_f19 = 0xffffffffffffffffULL;
+ vex_state->guest_f20 = 0xffffffffffffffffULL;
+ vex_state->guest_f21 = 0xffffffffffffffffULL;
+ vex_state->guest_f22 = 0xffffffffffffffffULL;
+ vex_state->guest_f23 = 0xffffffffffffffffULL;
+ vex_state->guest_f24 = 0xffffffffffffffffULL;
+ vex_state->guest_f25 = 0xffffffffffffffffULL;
+ vex_state->guest_f26 = 0xffffffffffffffffULL;
+ vex_state->guest_f27 = 0xffffffffffffffffULL;
+ vex_state->guest_f28 = 0xffffffffffffffffULL;
+ vex_state->guest_f29 = 0xffffffffffffffffULL;
+ vex_state->guest_f30 = 0xffffffffffffffffULL;
+ vex_state->guest_f31 = 0xffffffffffffffffULL;
vex_state->guest_FIR = 0; /* FP implementation and revision register */
vex_state->guest_FCCR = 0; /* FP condition codes register */
#define LWX_SWX_PATTERN64 \
t2 = newTemp(Ity_I64); \
- assign(t2, binop(Iop_And64, mkexpr(t1), mkU64(0xFFFFFFFFFFFFFFFC))); \
+ assign(t2, binop(Iop_And64, mkexpr(t1), mkU64(0xFFFFFFFFFFFFFFFCULL))); \
t4 = newTemp(Ity_I32); \
assign(t4, mkNarrowTo32( ty, binop(Iop_And64, \
mkexpr(t1), mkU64(0x3))));
#define LWX_SWX_PATTERN64_1 \
t2 = newTemp(Ity_I64); \
- assign(t2, binop(Iop_And64, mkexpr(t1), mkU64(0xFFFFFFFFFFFFFFF8))); \
+ assign(t2, binop(Iop_And64, mkexpr(t1), mkU64(0xFFFFFFFFFFFFFFF8ULL))); \
t4 = newTemp(Ity_I64); \
assign(t4, binop(Iop_And64, mkexpr(t1), mkU64(0x7)));
VexAbiInfo* abiinfo,
Bool sigill_diag )
{
- IRTemp t0, t1, t2, t3, t4, t5, t6, t7, t8;
+ IRTemp t0, t1 = 0, t2, t3, t4, t5, t6, t7, t8;
UInt opcode, cins, rs, rt, rd, sa, ft, fs, fd, fmt, tf, nd, function,
trap_code, imm, instr_index, p, msb, lsb, size, rot, sel;
if (mode64) {
putIReg(31, mkU64(guest_PC_curr_instr + 8));
t0 = newTemp(ty);
- assign(t0, mkU64((guest_PC_curr_instr & 0xFFFFFFFFF0000000) |
+ assign(t0, mkU64((guest_PC_curr_instr & 0xFFFFFFFFF0000000ULL) |
(instr_index << 2)));
} else {
putIReg(31, mkU32(guest_PC_curr_instr + 8));
DIP("j 0x%x", instr_index);
t0 = newTemp(ty);
if (mode64)
- assign(t0, mkU64((guest_PC_curr_instr & 0xFFFFFFFFF0000000) |
+ assign(t0, mkU64((guest_PC_curr_instr & 0xFFFFFFFFF0000000ULL) |
(instr_index << 2)));
else
assign(t0, mkU32((guest_PC_curr_instr & 0xF0000000) |
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
assign(t0, binop(Iop_Add64, getIReg(rs), getIReg(rt)));
- assign(t1, binop(Iop_And64, mkexpr(t0), mkU64(0xfffffffffffffff8)));
+ assign(t1, binop(Iop_And64, mkexpr(t0), mkU64(0xfffffffffffffff8ULL)));
store(mkexpr(t1), getFReg(fs));
break;
assign(t5, binop(Iop_Mul64, binop(Iop_Sub64, mkU64(0x7), mkexpr(t4)),
mkU64(0x8)));
- assign(t6, binop(Iop_Shr64, mkU64(0xFFFFFFFFFFFFFFFF),
+ assign(t6, binop(Iop_Shr64, mkU64(0xFFFFFFFFFFFFFFFFULL),
narrowTo(Ity_I8, mkexpr(t5))));
- assign(t7, binop(Iop_Xor64, mkU64(0xFFFFFFFFFFFFFFFF), mkexpr(t6)));
+ assign(t7, binop(Iop_Xor64, mkU64(0xFFFFFFFFFFFFFFFFULL), mkexpr(t6)));
assign(t8, binop(Iop_And64, load(Ity_I64, mkexpr(t2)), mkexpr(t7)));
store(mkexpr(t2), binop(Iop_Or64, mkexpr(t8), mkexpr(t3)));
break;
/* word content - adjusted */
t5 = newTemp(Ity_I64);
assign(t5, binop(Iop_And64, load(Ity_I64, mkexpr(t2)), unop(Iop_Not64,
- binop(Iop_Shl64, mkU64(0xFFFFFFFFFFFFFFFF),
+ binop(Iop_Shl64, mkU64(0xFFFFFFFFFFFFFFFFULL),
narrowTo(Ity_I8, binop(Iop_Shl64, mkexpr(t4), mkU8(0x3)))))));
store(mkexpr(t2), binop(Iop_Xor64, mkexpr(t5), mkexpr(t3)));
DIP("dclo r%d, r%d", rd, rs);
t1 = newTemp(Ity_I1);
assign(t1, binop(Iop_CmpEQ64, getIReg(rs),
- mkU64(0xffffffffffffffff)));
+ mkU64(0xffffffffffffffffULL)));
putIReg(rd, IRExpr_ITE(mkexpr(t1),
mkU64(0x40),
unop(Iop_Clz64, unop(Iop_Not64,
t3 = newTemp(Ity_I64);
assign(t1, binop(Iop_And64, getIReg(rt), /* hi */
- mkU64(0xFFFFFFFF00000000)));
+ mkU64(0xFFFFFFFF00000000ULL)));
assign(t2, binop(Iop_Sar64, mkexpr(t1), mkU8(sa)));
mkNarrowTo32(ty, getIReg(rs)), mkU32(0x0000001F))));
assign(t1, binop(Iop_And64, getIReg(rt), /* hi */
- mkU64(0xFFFFFFFF00000000)));
+ mkU64(0xFFFFFFFF00000000ULL)));
assign(t2, binop(Iop_Sar64, mkexpr(t1), mkexpr(t4)));
assign(t2, unop(Iop_1Uto64,
binop(Iop_CmpEQ64,
binop(Iop_And64, mkexpr(t1),
- mkU64(0x8000000000000000)),
- mkU64(0x8000000000000000))));
+ mkU64(0x8000000000000000ULL)),
+ mkU64(0x8000000000000000ULL))));
assign(t3, binop(Iop_Xor64, mkexpr(t0), mkexpr(tmpRs64)));
assign(t4, unop(Iop_1Uto64,
binop(Iop_CmpNE64,
binop(Iop_And64, mkexpr(t3),
- mkU64(0x8000000000000000)),
- mkU64(0x8000000000000000))));
+ mkU64(0x8000000000000000ULL)),
+ mkU64(0x8000000000000000ULL))));
stmt(IRStmt_Exit(binop(Iop_CmpEQ64,
binop(Iop_Or64, mkexpr(t2), mkexpr(t4)),
goto no overflow;
we have overflow! */
- assign(t5, binop(Iop_Mul64, mkexpr(tmpRt64), mkU64(0xffffffffffffffff)));
+ assign(t5, binop(Iop_Mul64,
+ mkexpr(tmpRt64),
+ mkU64(0xffffffffffffffffULL)));
assign(t0, binop(Iop_Add64, mkexpr(tmpRs64), mkexpr(t5)));
assign(t1, binop(Iop_Xor64, mkexpr(tmpRs64), mkexpr(t5)));
- assign(t2, unop(Iop_1Sto64, binop(Iop_CmpEQ64, binop(Iop_And64,
- mkexpr(t1), mkU64(0x8000000000000000)),
- mkU64(0x8000000000000000))));
+ assign(t2, unop(Iop_1Sto64,
+ binop(Iop_CmpEQ64,
+ binop(Iop_And64,
+ mkexpr(t1),
+ mkU64(0x8000000000000000ULL)),
+ mkU64(0x8000000000000000ULL))));
assign(t3, binop(Iop_Xor64, mkexpr(t0), mkexpr(tmpRs64)));
- assign(t4, unop(Iop_1Sto64, binop(Iop_CmpNE64, binop(Iop_And64,
- mkexpr(t3), mkU64(0x8000000000000000)),
- mkU64(0x8000000000000000))));
+ assign(t4, unop(Iop_1Sto64,
+ binop(Iop_CmpNE64,
+ binop(Iop_And64,
+ mkexpr(t3),
+ mkU64(0x8000000000000000ULL)),
+ mkU64(0x8000000000000000ULL))));
stmt(IRStmt_Exit(binop(Iop_CmpEQ64, binop(Iop_Or64, mkexpr(t2),
mkexpr(t4)), mkU64(0)), Ijk_SigFPE_IntOvf,
case 0x02: /* BLTZL */
DIP("bltzl r%d, %d", rs, imm);
lastn = dis_branch_likely(binop(mode64 ? Iop_CmpNE64 : Iop_CmpNE32,
- binop(mode64 ? Iop_And64 : Iop_And32, getIReg(rs),
- mode64 ? mkU64(0x8000000000000000) : mkU32(0x80000000)),
- mode64 ? mkU64(0x8000000000000000) : mkU32(0x80000000)),
- imm);
+ binop(mode64 ? Iop_And64 : Iop_And32, getIReg(rs),
+ mode64 ? mkU64(0x8000000000000000ULL) : mkU32(0x80000000)),
+ mode64 ? mkU64(0x8000000000000000ULL) : mkU32(0x80000000)),
+ imm);
break;
case 0x03: /* BGEZL */
DIP("bgezl r%d, %d", rs, imm);
lastn = dis_branch_likely(binop(mode64 ? Iop_CmpNE64 : Iop_CmpNE32,
- binop(mode64 ? Iop_And64 : Iop_And32, getIReg(rs),
- mode64 ? mkU64(0x8000000000000000) : mkU32(0x80000000)),
- mode64 ? mkU64(0x0) : mkU32(0x0)), imm);
+ binop(mode64 ? Iop_And64 : Iop_And32, getIReg(rs),
+ mode64 ? mkU64(0x8000000000000000ULL) : mkU32(0x80000000)),
+ mode64 ? mkU64(0x0) : mkU32(0x0)), imm);
break;
case 0x10: /* BLTZAL */
putIReg(31, mode64 ? mkU64(guest_PC_curr_instr + 8) :
mkU32(guest_PC_curr_instr + 8));
lastn = dis_branch_likely(binop(mode64 ? Iop_CmpNE64 : Iop_CmpNE32,
- binop(mode64 ? Iop_And64 : Iop_And32, getIReg(rs),
- mode64 ? mkU64(0x8000000000000000) : mkU32(0x80000000)),
- mode64 ? mkU64(0x8000000000000000) : mkU32(0x80000000)),
- imm);
+ binop(mode64 ? Iop_And64 : Iop_And32, getIReg(rs),
+ mode64 ? mkU64(0x8000000000000000ULL) : mkU32(0x80000000)),
+ mode64 ? mkU64(0x8000000000000000ULL) : mkU32(0x80000000)),
+ imm);
break;
case 0x11: /* BGEZAL */
DIP("bgezall r%d, %d", rs, imm);
if (mode64) {
putIReg(31, mkU64(guest_PC_curr_instr + 8));
- lastn = dis_branch_likely(binop(Iop_CmpNE64, binop(Iop_And64,
- getIReg(rs), mkU64(0x8000000000000000)),
- mkU64(0x0)), imm);
+ lastn = dis_branch_likely(binop(Iop_CmpNE64,
+ binop(Iop_And64,
+ getIReg(rs),
+ mkU64(0x8000000000000000ULL)),
+ mkU64(0x0)),
+ imm);
} else {
putIReg(31, mkU32(guest_PC_curr_instr + 8));
lastn = dis_branch_likely(binop(Iop_CmpNE32, binop(Iop_And32,
assign(t1, binop(Iop_Xor64, mkexpr(tmpRs64),
mkU64(extend_s_16to64(imm))));
assign(t2, unop(Iop_1Sto64, binop(Iop_CmpEQ64, binop(Iop_And64,
- mkexpr(t1), mkU64(0x8000000000000000)),
- mkU64(0x8000000000000000))));
+ mkexpr(t1), mkU64(0x8000000000000000ULL)),
+ mkU64(0x8000000000000000ULL))));
assign(t3, binop(Iop_Xor64, mkexpr(t0), mkexpr(tmpRs64)));
assign(t4, unop(Iop_1Sto64, binop(Iop_CmpNE64, binop(Iop_And64,
- mkexpr(t3), mkU64(0x8000000000000000)),
- mkU64(0x8000000000000000))));
+ mkexpr(t3), mkU64(0x8000000000000000ULL)),
+ mkU64(0x8000000000000000ULL))));
stmt(IRStmt_Exit(binop(Iop_CmpEQ64, binop(Iop_Or64, mkexpr(t2),
mkexpr(t4)), mkU64(0)), Ijk_SigFPE_IntOvf,
assign(t5, binop(Iop_Mul64, mkexpr(t4), mkU64(0x8)));
- assign(t6, binop(Iop_Shr64, mkU64(0x00FFFFFFFFFFFFFF),
+ assign(t6, binop(Iop_Shr64, mkU64(0x00FFFFFFFFFFFFFFULL),
narrowTo(Ity_I8, mkexpr(t5))));
assign(t7, binop(Iop_And64, getIReg(rt), mkexpr(t6)));
/* rt content - adjusted */
t5 = newTemp(Ity_I64);
assign(t5, binop(Iop_And64, getIReg(rt), unop(Iop_Not64,
- binop(Iop_Shr64, mkU64(0xFFFFFFFFFFFFFFFF),
+ binop(Iop_Shr64, mkU64(0xFFFFFFFFFFFFFFFFULL),
narrowTo(Ity_I8, binop(Iop_Shl64, mkexpr(t4), mkU8(0x3)))))));
putIReg(rt, binop(Iop_Or64, mkexpr(t5), mkexpr(t3)));