-Wformat-signedness.
git-svn-id: svn://svn.valgrind.org/vex/trunk@3167
break;
default:
- vex_printf("unhandled opc_aux = 0x%2x\n", gregLO3ofRM(modrm));
+ vex_printf("unhandled opc_aux = 0x%2x\n",
+ (UInt)gregLO3ofRM(modrm));
vex_printf("first_opcode == 0xD8\n");
goto decode_fail;
}
/* Dunno if this is right */
case 0xD0 ... 0xD7: /* FCOM %st(?),%st(0) */
r_dst = (UInt)modrm - 0xD0;
- DIP("fcom %%st(0),%%st(%d)\n", r_dst);
+ DIP("fcom %%st(0),%%st(%u)\n", r_dst);
/* This forces C1 to zero, which isn't right. */
put_C3210(
unop(Iop_32Uto64,
/* Dunno if this is right */
case 0xD8 ... 0xDF: /* FCOMP %st(?),%st(0) */
r_dst = (UInt)modrm - 0xD8;
- DIP("fcomp %%st(0),%%st(%d)\n", r_dst);
+ DIP("fcomp %%st(0),%%st(%u)\n", r_dst);
/* This forces C1 to zero, which isn't right. */
put_C3210(
unop(Iop_32Uto64,
break;
default:
- vex_printf("unhandled opc_aux = 0x%2x\n", gregLO3ofRM(modrm));
+ vex_printf("unhandled opc_aux = 0x%2x\n",
+ (UInt)gregLO3ofRM(modrm));
vex_printf("first_opcode == 0xD9\n");
goto decode_fail;
}
break;
default:
- vex_printf("unhandled opc_aux = 0x%2x\n", gregLO3ofRM(modrm));
+ vex_printf("unhandled opc_aux = 0x%2x\n",
+ (UInt)gregLO3ofRM(modrm));
vex_printf("first_opcode == 0xDA\n");
goto decode_fail;
}
}
default:
- vex_printf("unhandled opc_aux = 0x%2x\n", gregLO3ofRM(modrm));
+ vex_printf("unhandled opc_aux = 0x%2x\n",
+ (UInt)gregLO3ofRM(modrm));
vex_printf("first_opcode == 0xDB\n");
goto decode_fail;
}
break;
default:
- vex_printf("unhandled opc_aux = 0x%2x\n", gregLO3ofRM(modrm));
+ vex_printf("unhandled opc_aux = 0x%2x\n",
+ (UInt)gregLO3ofRM(modrm));
vex_printf("first_opcode == 0xDC\n");
goto decode_fail;
}
}
default:
- vex_printf("unhandled opc_aux = 0x%2x\n", gregLO3ofRM(modrm));
+ vex_printf("unhandled opc_aux = 0x%2x\n",
+ (UInt)gregLO3ofRM(modrm));
vex_printf("first_opcode == 0xDD\n");
goto decode_fail;
}
break;
default:
- vex_printf("unhandled opc_aux = 0x%2x\n", gregLO3ofRM(modrm));
+ vex_printf("unhandled opc_aux = 0x%2x\n",
+ (UInt)gregLO3ofRM(modrm));
vex_printf("first_opcode == 0xDE\n");
goto decode_fail;
}
break;
default:
- vex_printf("unhandled opc_aux = 0x%2x\n", gregLO3ofRM(modrm));
+ vex_printf("unhandled opc_aux = 0x%2x\n",
+ (UInt)gregLO3ofRM(modrm));
vex_printf("first_opcode == 0xDF\n");
goto decode_fail;
}
case 0xFB: op = Iop_Sub64; break;
default:
- vex_printf("\n0x%x\n", (Int)opc);
+ vex_printf("\n0x%x\n", (UInt)opc);
vpanic("dis_MMXop_regmem_to_reg");
}
assign( plain, binop(op, getXMMReg(gregOfRexRM(pfx,rm)),
getXMMReg(eregOfRexRM(pfx,rm))) );
delta += 2;
- DIP("%s $%d,%s,%s\n", opname,
- (Int)imm8,
+ DIP("%s $%u,%s,%s\n", opname,
+ imm8,
nameXMMReg(eregOfRexRM(pfx,rm)),
nameXMMReg(gregOfRexRM(pfx,rm)) );
} else {
)
);
delta += alen+1;
- DIP("%s $%d,%s,%s\n", opname,
- (Int)imm8,
+ DIP("%s $%u,%s,%s\n", opname,
+ imm8,
dis_buf,
nameXMMReg(gregOfRexRM(pfx,rm)) );
}
assign(sV, getXMMReg(rE));
imm8 = getUChar(delta+1) & 7;
delta += 1+1;
- DIP("%spextrw $%d,%s,%s\n", isAvx ? "v" : "",
- (Int)imm8, nameXMMReg(rE), nameIReg32(rG));
+ DIP("%spextrw $%u,%s,%s\n", isAvx ? "v" : "",
+ imm8, nameXMMReg(rE), nameIReg32(rG));
} else {
/* The memory case is disallowed, apparently. */
return deltaIN; /* FAIL */
assign(t4, getIReg16(eregOfRexRM(pfx,modrm)));
delta += 1+1;
lane = getUChar(delta-1);
- DIP("pinsrw $%d,%s,%s\n", (Int)lane,
+ DIP("pinsrw $%d,%s,%s\n", lane,
nameIReg16(eregOfRexRM(pfx,modrm)),
nameMMXReg(gregLO3ofRM(modrm)));
} else {
delta += 1+alen;
lane = getUChar(delta-1);
assign(t4, loadLE(Ity_I16, mkexpr(addr)));
- DIP("pinsrw $%d,%s,%s\n", (Int)lane,
+ DIP("pinsrw $%d,%s,%s\n", lane,
dis_buf,
nameMMXReg(gregLO3ofRM(modrm)));
}
delta += 1+1;
lane = getUChar(delta-1);
DIP("pinsrw $%d,%s,%s\n",
- (Int)lane, nameIReg16(rE), nameXMMReg(rG));
+ lane, nameIReg16(rE), nameXMMReg(rG));
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf,
1/*byte after the amode*/ );
lane = getUChar(delta-1);
assign(t4, loadLE(Ity_I16, mkexpr(addr)));
DIP("pinsrw $%d,%s,%s\n",
- (Int)lane, dis_buf, nameXMMReg(rG));
+ lane, dis_buf, nameXMMReg(rG));
}
IRTemp src_vec = newTemp(Ity_V128);
assign(src_vec, getXMMReg(rG));
assign( sV, getXMMReg(eregOfRexRM(pfx,modrm)) );
d64 = (Long)getUChar(delta+1);
delta += 1+1;
- DIP("palignr $%d,%s,%s\n", (Int)d64,
+ DIP("palignr $%lld,%s,%s\n", d64,
nameXMMReg(eregOfRexRM(pfx,modrm)),
nameXMMReg(gregOfRexRM(pfx,modrm)));
} else {
assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
d64 = (Long)getUChar(delta+alen);
delta += alen+1;
- DIP("palignr $%d,%s,%s\n", (Int)d64,
+ DIP("palignr $%lld,%s,%s\n", d64,
dis_buf,
nameXMMReg(gregOfRexRM(pfx,modrm)));
}
assign( sV, getMMXReg(eregLO3ofRM(modrm)) );
d64 = (Long)getUChar(delta+1);
delta += 1+1;
- DIP("palignr $%d,%s,%s\n", (Int)d64,
+ DIP("palignr $%lld,%s,%s\n", d64,
nameMMXReg(eregLO3ofRM(modrm)),
nameMMXReg(gregLO3ofRM(modrm)));
} else {
assign( sV, loadLE(Ity_I64, mkexpr(addr)) );
d64 = (Long)getUChar(delta+alen);
delta += alen+1;
- DIP("palignr $%d%s,%s\n", (Int)d64,
+ DIP("palignr $%lld%s,%s\n", d64,
dis_buf,
nameMMXReg(gregLO3ofRM(modrm)));
}
guest_RIP_bbstart+delta, d64 );
vassert(dres->whatNext == Dis_StopHere);
}
- DIP("j%s-8 0x%llx %s\n", name_AMD64Condcode(opc - 0x70), d64, comment);
+ DIP("j%s-8 0x%llx %s\n", name_AMD64Condcode(opc - 0x70), (ULong)d64,
+ comment);
return delta;
}
assign( addr, handleAddrOverrides(vbi, pfx, mkU64(d64)) );
putIRegRAX(sz, loadLE( ty, mkexpr(addr) ));
DIP("mov%c %s0x%llx, %s\n", nameISize(sz),
- segRegTxt(pfx), d64,
+ segRegTxt(pfx), (ULong)d64,
nameIRegRAX(sz));
return delta;
assign( addr, handleAddrOverrides(vbi, pfx, mkU64(d64)) );
storeLE( mkexpr(addr), getIRegRAX(sz) );
DIP("mov%c %s, %s0x%llx\n", nameISize(sz), nameIRegRAX(sz),
- segRegTxt(pfx), d64);
+ segRegTxt(pfx), (ULong)d64);
return delta;
case 0xA4:
}
stmt( IRStmt_Exit(cond, Ijk_Boring, IRConst_U64(d64), OFFB_RIP) );
- DIP("loop%s%s 0x%llx\n", xtra, haveASO(pfx) ? "l" : "", d64);
+ DIP("loop%s%s 0x%llx\n", xtra, haveASO(pfx) ? "l" : "", (ULong)d64);
return delta;
}
IRConst_U64(d64),
OFFB_RIP
));
- DIP("jecxz 0x%llx\n", d64);
+ DIP("jecxz 0x%llx\n", (ULong)d64);
} else {
/* 64-bit */
stmt( IRStmt_Exit( binop(Iop_CmpEQ64,
IRConst_U64(d64),
OFFB_RIP
));
- DIP("jrcxz 0x%llx\n", d64);
+ DIP("jrcxz 0x%llx\n", (ULong)d64);
}
return delta;
jmp_lit(dres, Ijk_Call, d64);
vassert(dres->whatNext == Dis_StopHere);
}
- DIP("call 0x%llx\n",d64);
+ DIP("call 0x%llx\n", (ULong)d64);
return delta;
case 0xE9: /* Jv (jump, 16/32 offset) */
jmp_lit(dres, Ijk_Boring, d64);
vassert(dres->whatNext == Dis_StopHere);
}
- DIP("jmp 0x%llx\n", d64);
+ DIP("jmp 0x%llx\n", (ULong)d64);
return delta;
case 0xEB: /* Jb (jump, byte offset) */
jmp_lit(dres, Ijk_Boring, d64);
vassert(dres->whatNext == Dis_StopHere);
}
- DIP("jmp-8 0x%llx\n", d64);
+ DIP("jmp-8 0x%llx\n", (ULong)d64);
return delta;
case 0xF5: /* CMC */
guest_RIP_bbstart+delta, d64 );
vassert(dres->whatNext == Dis_StopHere);
}
- DIP("j%s-32 0x%llx %s\n", name_AMD64Condcode(opc - 0x80), d64, comment);
+ DIP("j%s-32 0x%llx %s\n", name_AMD64Condcode(opc - 0x80), (ULong)d64,
+ comment);
return delta;
}
UInt rE = eregOfRexRM(pfx,rm);
assign(argR, getXMMReg(rE));
delta += 1+1;
- DIP("%s $%d,%s,%s,%s\n",
- opname, (Int)imm8,
+ DIP("%s $%u,%s,%s,%s\n",
+ opname, imm8,
nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 1 );
: sz == 8 ? unop( Iop_64UtoV128, loadLE(Ity_I64, mkexpr(addr)))
: /*sz==4*/ unop( Iop_32UtoV128, loadLE(Ity_I32, mkexpr(addr))));
delta += alen+1;
- DIP("%s $%d,%s,%s,%s\n",
- opname, (Int)imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG));
+ DIP("%s $%u,%s,%s,%s\n",
+ opname, imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG));
}
assign(plain, preSwap ? binop(op, mkexpr(argR), mkexpr(argL))
UInt rE = eregOfRexRM(pfx,rm);
assign(argR, getYMMReg(rE));
delta += 1+1;
- DIP("%s $%d,%s,%s,%s\n",
- opname, (Int)imm8,
+ DIP("%s $%u,%s,%s,%s\n",
+ opname, imm8,
nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
} else {
addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 1 );
if (!ok) return deltaIN; /* FAIL */
assign(argR, loadLE(Ity_V256, mkexpr(addr)) );
delta += alen+1;
- DIP("%s $%d,%s,%s,%s\n",
- opname, (Int)imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
+ DIP("%s $%u,%s,%s,%s\n",
+ opname, imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
}
breakupV256toV128s( preSwap ? argR : argL, &argLhi, &argLlo );
assign( sV, getXMMReg(rE) );
imm8 = getUChar(delta+1);
delta += 1+1;
- DIP("vpalignr $%d,%s,%s,%s\n", imm8, nameXMMReg(rE),
+ DIP("vpalignr $%u,%s,%s,%s\n", imm8, nameXMMReg(rE),
nameXMMReg(rV), nameXMMReg(rG));
} else {
addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
imm8 = getUChar(delta+alen);
delta += alen+1;
- DIP("vpalignr $%d,%s,%s,%s\n", imm8, dis_buf,
+ DIP("vpalignr $%u,%s,%s,%s\n", imm8, dis_buf,
nameXMMReg(rV), nameXMMReg(rG));
}
assign( sV, getYMMReg(rE) );
imm8 = getUChar(delta+1);
delta += 1+1;
- DIP("vpalignr $%d,%s,%s,%s\n", imm8, nameYMMReg(rE),
+ DIP("vpalignr $%u,%s,%s,%s\n", imm8, nameYMMReg(rE),
nameYMMReg(rV), nameYMMReg(rG));
} else {
addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
assign( sV, loadLE(Ity_V256, mkexpr(addr)) );
imm8 = getUChar(delta+alen);
delta += alen+1;
- DIP("vpalignr $%d,%s,%s,%s\n", imm8, dis_buf,
+ DIP("vpalignr $%u,%s,%s,%s\n", imm8, dis_buf,
nameYMMReg(rV), nameYMMReg(rG));
}
if (sigill_diag) {
vex_printf("vex amd64->IR: unhandled instruction bytes: "
"0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
- (Int)getUChar(delta_start+0),
- (Int)getUChar(delta_start+1),
- (Int)getUChar(delta_start+2),
- (Int)getUChar(delta_start+3),
- (Int)getUChar(delta_start+4),
- (Int)getUChar(delta_start+5),
- (Int)getUChar(delta_start+6),
- (Int)getUChar(delta_start+7) );
+ getUChar(delta_start+0),
+ getUChar(delta_start+1),
+ getUChar(delta_start+2),
+ getUChar(delta_start+3),
+ getUChar(delta_start+4),
+ getUChar(delta_start+5),
+ getUChar(delta_start+6),
+ getUChar(delta_start+7) );
vex_printf("vex amd64->IR: REX=%d REX.W=%d REX.R=%d REX.X=%d REX.B=%d\n",
haveREX(pfx) ? 1 : 0, getRexW(pfx), getRexR(pfx),
getRexX(pfx), getRexB(pfx));
vassert(0);
}
putIReg64orSP(nn, mkexpr(tEA));
- DIP(atRN ? "ldrs%c %s, [%s], #%lld\n" : "ldrs%c %s, [%s, #%lld]!",
+ DIP(atRN ? "ldrs%c %s, [%s], #%llu\n" : "ldrs%c %s, [%s, #%llu]!",
ch, nameIRegOrZR(is64, tt), nameIReg64orSP(nn), simm9);
return True;
}
vassert(0);
}
DIP("ldurs%c %s, [%s, #%lld]",
- ch, nameIRegOrZR(is64, tt), nameIReg64orSP(nn), simm9);
+ ch, nameIRegOrZR(is64, tt), nameIReg64orSP(nn), (Long)simm9);
return True;
}
/* else fall through */
putIReg64orSP(nn, mkexpr(tEA));
DIP(atRN ? "%s %s, [%s], #%lld\n" : "%s %s, [%s, #%lld]!\n",
isLD ? "ldr" : "str",
- nameQRegLO(tt, ty), nameIReg64orSP(nn), simm9);
+ nameQRegLO(tt, ty), nameIReg64orSP(nn), (Long)simm9);
return True;
}
putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
const HChar* Ta = bitQ ==1 ? "16b" : "8b";
const HChar* nm = isTBX ? "tbx" : "tbl";
- DIP("%s %s.%s, {v%d.16b .. v%d.16b}, %s.%s\n",
+ DIP("%s %s.%s, {v%u.16b .. v%u.16b}, %s.%s\n",
nm, nameQReg128(dd), Ta, nn, (nn + len) % 32, nameQReg128(mm), Ta);
return True;
}
: (ks == 1 ? "sqdmlal" : "sqdmlsl");
const HChar arrNarrow = "bhsd"[size];
const HChar arrWide = "bhsd"[size+1];
- DIP("%s %c%d, %c%d, %c%d\n",
+ DIP("%s %c%u, %c%u, %c%u\n",
nm, arrWide, dd, arrNarrow, nn, arrNarrow, mm);
return True;
}
math_ZERO_ALL_EXCEPT_LOWEST_LANE(size, mkexpr(sat1n)));
const HChar arr = "bhsd"[size];
const HChar* nm = isR ? "sqrdmulh" : "sqdmulh";
- DIP("%s %c%d, %c%d, %c%d\n", nm, arr, dd, arr, nn, arr, mm);
+ DIP("%s %c%u, %c%u, %c%u\n", nm, arr, dd, arr, nn, arr, mm);
return True;
}
: (ks == 1 ? "sqdmlal" : "sqdmlsl");
const HChar arrNarrow = "bhsd"[size];
const HChar arrWide = "bhsd"[size+1];
- DIP("%s %c%d, %c%d, v%d.%c[%u]\n",
+ DIP("%s %c%u, %c%u, v%u.%c[%u]\n",
nm, arrWide, dd, arrNarrow, nn, dd, arrNarrow, ix);
return True;
}
updateQCFLAGwithDifferenceZHI(sat1q, sat1n, opZHI);
const HChar* nm = isR ? "sqrdmulh" : "sqdmulh";
HChar ch = size == X01 ? 'h' : 's';
- DIP("%s %c%d, %c%d, v%d.%c[%u]\n", nm, ch, dd, ch, nn, ch, dd, ix);
+ DIP("%s %c%u, %c%u, v%d.%c[%u]\n", nm, ch, dd, ch, nn, ch, (Int)dd, ix);
return True;
}
/* */
if (res) {
putQReg128(dd, res);
- DIP("%cshll%s %s.%s, %s.%s, #%d\n",
+ DIP("%cshll%s %s.%s, %s.%s, #%u\n",
isU ? 'u' : 's', isQ ? "2" : "",
nameQReg128(dd), ta, nameQReg128(nn), tb, sh);
return True;
putQReg128(dd, mkexpr(res));
const HChar* arrNarrow = nameArr_Q_SZ(bitQ, size);
const HChar* arrWide = nameArr_Q_SZ(1, size+1);
- DIP("shll%s %s.%s, %s.%s, #%u\n", is2 ? "2" : "",
+ DIP("shll%s %s.%s, %s.%s, #%d\n", is2 ? "2" : "",
nameQReg128(dd), arrWide, nameQReg128(nn), arrNarrow, 8 << size);
return True;
}
putDRegI64(dreg, triop(Iop_Slice64, /*hiI64*/getDRegI64(mreg),
/*loI64*/getDRegI64(nreg), mkU8(imm4)), condT);
}
- DIP("vext.8 %c%d, %c%d, %c%d, #%d\n", reg_t, dreg, reg_t, nreg,
+ DIP("vext.8 %c%u, %c%u, %c%u, #%u\n", reg_t, dreg, reg_t, nreg,
reg_t, mreg, imm4);
return True;
}
} else {
putDRegI64(dreg, mkexpr(res), condT);
}
- DIP("vdup.%d %c%d, d%d[%d]\n", size, Q ? 'q' : 'd', dreg, mreg, index);
+ DIP("vdup.%u %c%u, d%u[%u]\n", size, Q ? 'q' : 'd', dreg, mreg, index);
return True;
}
binop(andOp, mkexpr(arg_m), imm_val),
binop(andOp, mkexpr(arg_n), imm_val)),
mkU8(1))));
- DIP("vhadd.%c%d %c%d, %c%d, %c%d\n",
+ DIP("vhadd.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's', 8 << size, regType,
dreg, regType, nreg, regType, mreg);
} else {
assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
assign(tmp, binop(op2, mkexpr(arg_n), mkexpr(arg_m)));
setFlag_QC(mkexpr(res), mkexpr(tmp), Q, condT);
- DIP("vqadd.%c%d %c%d, %c%d, %c%d\n",
+ DIP("vqadd.%c%d %c%u %c%u, %c%u\n",
U ? 'u' : 's',
8 << size, reg_t, dreg, reg_t, nreg, reg_t, mreg);
}
mkU8(1))),
mkexpr(cc)));
}
- DIP("vrhadd.%c%d %c%d, %c%d, %c%d\n",
+ DIP("vrhadd.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's',
8 << size, reg_t, dreg, reg_t, nreg, reg_t, mreg);
} else {
assign(res, binop(Iop_And64, mkexpr(arg_n),
mkexpr(arg_m)));
}
- DIP("vand %c%d, %c%d, %c%d\n",
+ DIP("vand %c%u, %c%u, %c%u\n",
reg_t, dreg, reg_t, nreg, reg_t, mreg);
break;
}
assign(res, binop(Iop_And64, mkexpr(arg_n),
unop(Iop_Not64, mkexpr(arg_m))));
}
- DIP("vbic %c%d, %c%d, %c%d\n",
+ DIP("vbic %c%u, %c%u, %c%u\n",
reg_t, dreg, reg_t, nreg, reg_t, mreg);
break;
}
assign(res, binop(Iop_Or64, mkexpr(arg_n),
mkexpr(arg_m)));
}
- DIP("vorr %c%d, %c%d, %c%d\n",
+ DIP("vorr %c%u, %c%u, %c%u\n",
reg_t, dreg, reg_t, nreg, reg_t, mreg);
} else {
/* VMOV */
HChar reg_t = Q ? 'q' : 'd';
assign(res, mkexpr(arg_m));
- DIP("vmov %c%d, %c%d\n", reg_t, dreg, reg_t, mreg);
+ DIP("vmov %c%u, %c%u\n", reg_t, dreg, reg_t, mreg);
}
break;
case 3:{
assign(res, binop(Iop_Or64, mkexpr(arg_n),
unop(Iop_Not64, mkexpr(arg_m))));
}
- DIP("vorn %c%d, %c%d, %c%d\n",
+ DIP("vorn %c%u, %c%u, %c%u\n",
reg_t, dreg, reg_t, nreg, reg_t, mreg);
break;
}
unop(notOp, mkexpr(arg_n)),
mkexpr(arg_m)),
imm_val)));
- DIP("vhsub.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vhsub.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
mreg);
assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
assign(tmp, binop(op2, mkexpr(arg_n), mkexpr(arg_m)));
setFlag_QC(mkexpr(res), mkexpr(tmp), Q, condT);
- DIP("vqsub.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vqsub.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
mreg);
if (B == 0) {
/* VCGT */
assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
- DIP("vcgt.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vcgt.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
mreg);
assign(res,
unop(Q ? Iop_NotV128 : Iop_Not64,
binop(op, mkexpr(arg_m), mkexpr(arg_n))));
- DIP("vcge.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vcge.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
mreg);
else
assign(res, binop(op, mkexpr(arg_m), mkexpr(tmp)));
}
- DIP("vshl.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vshl.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, Q ? 'q' : 'd',
nreg);
binop(Q ? Iop_AndV128 : Iop_And64,
mkexpr(arg_m), mkexpr(mask)),
Q, condT);
- DIP("vqshl.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vqshl.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, Q ? 'q' : 'd',
nreg);
binop(op, mkexpr(arg_m), mkexpr(arg_n)),
mkexpr(round)));
}
- DIP("vrshl.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vrshl.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, Q ? 'q' : 'd',
nreg);
binop(Q ? Iop_AndV128 : Iop_And64,
mkexpr(arg_m), mkexpr(mask)),
Q, condT);
- DIP("vqrshl.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vqrshl.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, Q ? 'q' : 'd',
nreg);
}
}
assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
- DIP("vmax.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vmax.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
mreg);
}
}
assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
- DIP("vmin.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vmin.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
mreg);
mkexpr(arg_n)),
unop(Q ? Iop_NotV128 : Iop_Not64,
mkexpr(cond)))));
- DIP("vabd.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vabd.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
mreg);
unop(Q ? Iop_NotV128 : Iop_Not64,
mkexpr(cond)))));
assign(res, binop(op_add, mkexpr(acc), mkexpr(tmp)));
- DIP("vaba.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vaba.%c%d %c%u, %c%u, %c%u\n",
U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
mreg);
case 3: op = Q ? Iop_Add64x2 : Iop_Add64; break;
default: vassert(0);
}
- DIP("vadd.i%u %c%u, %c%u, %c%u\n",
+ DIP("vadd.i%d %c%u, %c%u, %c%u\n",
8 << size, Q ? 'q' : 'd',
dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
} else {
case 3: op = Q ? Iop_Sub64x2 : Iop_Sub64; break;
default: vassert(0);
}
- DIP("vsub.i%u %c%u, %c%u, %c%u\n",
+ DIP("vsub.i%d %c%u, %c%u, %c%u\n",
8 << size, Q ? 'q' : 'd',
dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
}
assign(res, unop(op, binop(Q ? Iop_AndV128 : Iop_And64,
mkexpr(arg_n),
mkexpr(arg_m))));
- DIP("vtst.%u %c%u, %c%u, %c%u\n",
+ DIP("vtst.%d %c%u, %c%u, %c%u\n",
8 << size, Q ? 'q' : 'd',
dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
} else {
binop(Q ? Iop_XorV128 : Iop_Xor64,
mkexpr(arg_n),
mkexpr(arg_m)))));
- DIP("vceq.i%u %c%u, %c%u, %c%u\n",
+ DIP("vceq.i%d %c%u, %c%u, %c%u\n",
8 << size, Q ? 'q' : 'd',
dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
}
assign(res, binop(op2,
Q ? getQReg(dreg) : getDRegI64(dreg),
binop(op, mkexpr(arg_n), mkexpr(arg_m))));
- DIP("vml%c.i%u %c%u, %c%u, %c%u\n",
+ DIP("vml%c.i%d %c%u, %c%u, %c%u\n",
P ? 's' : 'a', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
mreg);
}
}
assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
- DIP("vmul.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vmul.%c%d %c%u, %c%u, %c%u\n",
P ? 'p' : 'i', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
mreg);
}
}
assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
- DIP("vp%s.%c%u %c%u, %c%u, %c%u\n",
+ DIP("vp%s.%c%d %c%u, %c%u, %c%u\n",
P ? "min" : "max", U ? 'u' : 's',
8 << size, Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg,
Q ? 'q' : 'd', mreg);
Q ? mkU128(imm) : mkU64(imm))),
Q ? mkU128(0) : mkU64(0),
Q, condT);
- DIP("vqdmulh.s%u %c%u, %c%u, %c%u\n",
+ DIP("vqdmulh.s%d %c%u, %c%u, %c%u\n",
8 << size, Q ? 'q' : 'd',
dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
} else {
Q ? mkU128(imm) : mkU64(imm))),
Q ? mkU128(0) : mkU64(0),
Q, condT);
- DIP("vqrdmulh.s%u %c%u, %c%u, %c%u\n",
+ DIP("vqrdmulh.s%d %c%u, %c%u, %c%u\n",
8 << size, Q ? 'q' : 'd',
dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
}
assign(arg_m, unop(cvt, getDRegI64(mreg)));
putQReg(dreg, binop(op, mkexpr(arg_n), mkexpr(arg_m)),
condT);
- DIP("v%s%c.%c%u q%u, %c%u, d%u\n", (A & 2) ? "sub" : "add",
+ DIP("v%s%c.%c%d q%u, %c%u, d%u\n", (A & 2) ? "sub" : "add",
(A & 1) ? 'w' : 'l', U ? 'u' : 's', 8 << size, dreg,
(A & 1) ? 'q' : 'd', nreg, mreg);
return True;
}
putDRegI64(dreg, unop(cvt, binop(sh, mkexpr(res), mkU8(8 << size))),
condT);
- DIP("v%saddhn.i%u d%u, q%u, q%u\n", U ? "r" : "", 16 << size, dreg,
+ DIP("v%saddhn.i%d d%u, q%u, q%u\n", U ? "r" : "", 16 << size, dreg,
nreg, mreg);
return True;
case 5:
unop(Iop_NotV128, mkexpr(cond)))),
getQReg(dreg)));
putQReg(dreg, mkexpr(res), condT);
- DIP("vabal.%c%u q%u, d%u, d%u\n", U ? 'u' : 's', 8 << size, dreg,
+ DIP("vabal.%c%d q%u, d%u, d%u\n", U ? 'u' : 's', 8 << size, dreg,
nreg, mreg);
return True;
case 6:
}
putDRegI64(dreg, unop(cvt, binop(sh, mkexpr(res), mkU8(8 << size))),
condT);
- DIP("v%ssubhn.i%u d%u, q%u, q%u\n", U ? "r" : "", 16 << size, dreg,
+ DIP("v%ssubhn.i%d d%u, q%u, q%u\n", U ? "r" : "", 16 << size, dreg,
nreg, mreg);
return True;
case 7:
binop(op, mkexpr(arg_m), mkexpr(arg_n)),
unop(Iop_NotV128, mkexpr(cond)))));
putQReg(dreg, mkexpr(res), condT);
- DIP("vabdl.%c%u q%u, d%u, d%u\n", U ? 'u' : 's', 8 << size, dreg,
+ DIP("vabdl.%c%d q%u, d%u, d%u\n", U ? 'u' : 's', 8 << size, dreg,
nreg, mreg);
return True;
case 8:
res = newTemp(Ity_V128);
assign(res, binop(op, getDRegI64(nreg),getDRegI64(mreg)));
putQReg(dreg, binop(op2, getQReg(dreg), mkexpr(res)), condT);
- DIP("vml%cl.%c%u q%u, d%u, d%u\n", P ? 's' : 'a', U ? 'u' : 's',
+ DIP("vml%cl.%c%d q%u, d%u, d%u\n", P ? 's' : 'a', U ? 'u' : 's',
8 << size, dreg, nreg, mreg);
return True;
case 9:
mkU64(0),
False, condT);
putQReg(dreg, binop(add, getQReg(dreg), mkexpr(res)), condT);
- DIP("vqdml%cl.s%u q%u, d%u, d%u\n", P ? 's' : 'a', 8 << size, dreg,
+ DIP("vqdml%cl.s%d q%u, d%u, d%u\n", P ? 's' : 'a', 8 << size, dreg,
nreg, mreg);
return True;
case 12:
}
putQReg(dreg, binop(op, getDRegI64(nreg),
getDRegI64(mreg)), condT);
- DIP("vmull.%c%u q%u, d%u, d%u\n", P ? 'p' : (U ? 'u' : 's'),
+ DIP("vmull.%c%d q%u, d%u, d%u\n", P ? 'p' : (U ? 'u' : 's'),
8 << size, dreg, nreg, mreg);
return True;
case 13:
binop(op2, getDRegI64(mreg), mkU64(imm))),
mkU64(0),
False, condT);
- DIP("vqdmull.s%u q%u, d%u, d%u\n", 8 << size, dreg, nreg, mreg);
+ DIP("vqdmull.s%d q%u, d%u, d%u\n", 8 << size, dreg, nreg, mreg);
return True;
default:
return False;
else
putDRegI64(dreg, binop(op2, getDRegI64(dreg), mkexpr(res)),
condT);
- DIP("vml%c.%c%u %c%u, %c%u, d%u[%u]\n", INSN(10,10) ? 's' : 'a',
+ DIP("vml%c.%c%d %c%u, %c%u, d%u[%u]\n", INSN(10,10) ? 's' : 'a',
INSN(8,8) ? 'f' : 'i', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, mreg, index);
return True;
op2 = INSN(10,10) ? sub : add;
assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
putQReg(dreg, binop(op2, getQReg(dreg), mkexpr(res)), condT);
- DIP("vml%cl.%c%u q%u, d%u, d%u[%u]\n",
+ DIP("vml%cl.%c%d q%u, d%u, d%u[%u]\n",
INSN(10,10) ? 's' : 'a', U ? 'u' : 's',
8 << size, dreg, nreg, mreg, index);
return True;
setFlag_QC(mkexpr(tmp), binop(add, getQReg(dreg), mkexpr(res)),
True, condT);
putQReg(dreg, binop(add, getQReg(dreg), mkexpr(res)), condT);
- DIP("vqdml%cl.s%u q%u, d%u, d%u[%u]\n", P ? 's' : 'a', 8 << size,
+ DIP("vqdml%cl.s%d q%u, d%u, d%u[%u]\n", P ? 's' : 'a', 8 << size,
dreg, nreg, mreg, index);
return True;
}
putQReg(dreg, mkexpr(res), condT);
else
putDRegI64(dreg, mkexpr(res), condT);
- DIP("vmul.%c%u %c%u, %c%u, d%u[%u]\n", INSN(8,8) ? 'f' : 'i',
+ DIP("vmul.%c%d %c%u, %c%u, d%u[%u]\n", INSN(8,8) ? 'f' : 'i',
8 << size, Q ? 'q' : 'd', dreg,
Q ? 'q' : 'd', nreg, mreg, index);
return True;
}
assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
putQReg(dreg, mkexpr(res), condT);
- DIP("vmull.%c%u q%u, d%u, d%u[%u]\n", U ? 'u' : 's', 8 << size, dreg,
+ DIP("vmull.%c%d q%u, d%u, d%u[%u]\n", U ? 'u' : 's', 8 << size, dreg,
nreg, mreg, index);
return True;
}
binop(op2, mkexpr(arg_m), mkU64(imm))),
mkU64(0),
False, condT);
- DIP("vqdmull.s%u q%u, d%u, d%u[%u]\n", 8 << size, dreg, nreg, mreg,
+ DIP("vqdmull.s%d q%u, d%u, d%u[%u]\n", 8 << size, dreg, nreg, mreg,
index);
return True;
}
putQReg(dreg, mkexpr(res), condT);
else
putDRegI64(dreg, mkexpr(res), condT);
- DIP("vqdmulh.s%u %c%u, %c%u, d%u[%u]\n",
+ DIP("vqdmulh.s%d %c%u, %c%u, d%u[%u]\n",
8 << size, Q ? 'q' : 'd', dreg,
Q ? 'q' : 'd', nreg, mreg, index);
return True;
putQReg(dreg, mkexpr(res), condT);
else
putDRegI64(dreg, mkexpr(res), condT);
- DIP("vqrdmulh.s%u %c%u, %c%u, d%u[%u]\n",
+ DIP("vqrdmulh.s%d %c%u, %c%u, d%u[%u]\n",
8 << size, Q ? 'q' : 'd', dreg,
Q ? 'q' : 'd', nreg, mreg, index);
return True;
putDRegI64(dreg, binop(add, mkexpr(res), getDRegI64(dreg)),
condT);
}
- DIP("vrsra.%c%u %c%u, %c%u, #%u\n",
+ DIP("vrsra.%c%d %c%u, %c%u, #%u\n",
U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, shift_imm);
} else {
} else {
putDRegI64(dreg, mkexpr(res), condT);
}
- DIP("vrshr.%c%u %c%u, %c%u, #%u\n", U ? 'u' : 's', 8 << size,
+ DIP("vrshr.%c%d %c%u, %c%u, #%u\n", U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, shift_imm);
}
return True;
putDRegI64(dreg, binop(add, mkexpr(res), getDRegI64(dreg)),
condT);
}
- DIP("vsra.%c%u %c%u, %c%u, #%u\n", U ? 'u' : 's', 8 << size,
+ DIP("vsra.%c%d %c%u, %c%u, #%u\n", U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, shift_imm);
} else {
if (Q) {
} else {
putDRegI64(dreg, mkexpr(res), condT);
}
- DIP("vshr.%c%u %c%u, %c%u, #%u\n", U ? 'u' : 's', 8 << size,
+ DIP("vshr.%c%d %c%u, %c%u, #%u\n", U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, shift_imm);
}
return True;
mkU8(shift_imm))));
putDRegI64(dreg, mkexpr(res), condT);
}
- DIP("vsri.%u %c%u, %c%u, #%u\n",
+ DIP("vsri.%d %c%u, %c%u, #%u\n",
8 << size, Q ? 'q' : 'd', dreg,
Q ? 'q' : 'd', mreg, shift_imm);
return True;
mkU8(shift_imm))));
putDRegI64(dreg, mkexpr(res), condT);
}
- DIP("vsli.%u %c%u, %c%u, #%u\n",
+ DIP("vsli.%d %c%u, %c%u, #%u\n",
8 << size, Q ? 'q' : 'd', dreg,
Q ? 'q' : 'd', mreg, shift_imm);
return True;
} else {
putDRegI64(dreg, mkexpr(res), condT);
}
- DIP("vshl.i%u %c%u, %c%u, #%u\n",
+ DIP("vshl.i%d %c%u, %c%u, #%u\n",
8 << size, Q ? 'q' : 'd', dreg,
Q ? 'q' : 'd', mreg, shift_imm);
return True;
default:
vassert(0);
}
- DIP("vqshl.u%u %c%u, %c%u, #%u\n",
+ DIP("vqshl.u%d %c%u, %c%u, #%u\n",
8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, shift_imm);
} else {
default:
vassert(0);
}
- DIP("vqshlu.s%u %c%u, %c%u, #%u\n",
+ DIP("vqshlu.s%d %c%u, %c%u, #%u\n",
8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, shift_imm);
}
default:
vassert(0);
}
- DIP("vqshl.s%u %c%u, %c%u, #%u\n",
+ DIP("vqshl.s%d %c%u, %c%u, #%u\n",
8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, shift_imm);
}
mkexpr(reg_m),
mkU8(shift_imm))));
putDRegI64(dreg, mkexpr(res), condT);
- DIP("vshrn.i%u d%u, q%u, #%u\n", 8 << size, dreg, mreg,
+ DIP("vshrn.i%d d%u, q%u, #%u\n", 8 << size, dreg, mreg,
shift_imm);
return True;
} else {
imm_val))));
putDRegI64(dreg, mkexpr(res), condT);
if (shift_imm == 0) {
- DIP("vmov%u d%u, q%u, #%u\n", 8 << size, dreg, mreg,
+ DIP("vmov%d d%u, q%u, #%u\n", 8 << size, dreg, mreg,
shift_imm);
} else {
- DIP("vrshrn.i%u d%u, q%u, #%u\n", 8 << size, dreg, mreg,
+ DIP("vrshrn.i%d d%u, q%u, #%u\n", 8 << size, dreg, mreg,
shift_imm);
}
return True;
default:
vassert(0);
}
- DIP("vq%sshrn.%c%u d%u, q%u, #%u\n", B ? "r" : "",
+ DIP("vq%sshrn.%c%d d%u, q%u, #%u\n", B ? "r" : "",
U ? 'u' : 's', 8 << size, dreg, mreg, shift_imm);
} else {
vassert(U);
default:
vassert(0);
}
- DIP("vq%sshrun.s%u d%u, q%u, #%u\n", B ? "r" : "",
+ DIP("vq%sshrun.s%d d%u, q%u, #%u\n", B ? "r" : "",
8 << size, dreg, mreg, shift_imm);
}
if (B) {
assign(res, binop(op, unop(cvt, getDRegI64(mreg)), mkU8(shift_imm)));
putQReg(dreg, mkexpr(res), condT);
if (shift_imm == 0) {
- DIP("vmovl.%c%u q%u, d%u\n", U ? 'u' : 's', 8 << size,
+ DIP("vmovl.%c%d q%u, d%u\n", U ? 'u' : 's', 8 << size,
dreg, mreg);
} else {
- DIP("vshll.%c%u q%u, d%u, #%u\n", U ? 'u' : 's', 8 << size,
+ DIP("vshll.%c%d q%u, d%u, #%u\n", U ? 'u' : 's', 8 << size,
dreg, mreg, shift_imm);
}
return True;
vassert(0);
}
assign(res, unop(op, mkexpr(arg_m)));
- DIP("vrev64.%u %c%u, %c%u\n", 8 << size,
+ DIP("vrev64.%d %c%u, %c%u\n", 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
break;
}
vassert(0);
}
assign(res, unop(op, mkexpr(arg_m)));
- DIP("vrev32.%u %c%u, %c%u\n", 8 << size,
+ DIP("vrev32.%d %c%u, %c%u\n", 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
break;
}
vassert(0);
}
assign(res, unop(op, mkexpr(arg_m)));
- DIP("vrev16.%u %c%u, %c%u\n", 8 << size,
+ DIP("vrev16.%d %c%u, %c%u\n", 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
break;
}
}
}
assign(res, unop(op, mkexpr(arg_m)));
- DIP("vpaddl.%c%u %c%u, %c%u\n", U ? 'u' : 's', 8 << size,
+ DIP("vpaddl.%c%d %c%u, %c%u\n", U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
break;
}
default: vassert(0);
}
assign(res, unop(op, mkexpr(arg_m)));
- DIP("vcls.s%u %c%u, %c%u\n", 8 << size, Q ? 'q' : 'd', dreg,
+ DIP("vcls.s%d %c%u, %c%u\n", 8 << size, Q ? 'q' : 'd', dreg,
Q ? 'q' : 'd', mreg);
break;
}
default: vassert(0);
}
assign(res, unop(op, mkexpr(arg_m)));
- DIP("vclz.i%u %c%u, %c%u\n", 8 << size, Q ? 'q' : 'd', dreg,
+ DIP("vclz.i%d %c%u, %c%u\n", 8 << size, Q ? 'q' : 'd', dreg,
Q ? 'q' : 'd', mreg);
break;
}
}
assign(res, binop(add_op, unop(op, mkexpr(arg_m)),
mkexpr(arg_d)));
- DIP("vpadal.%c%u %c%u, %c%u\n", U ? 'u' : 's', 8 << size,
+ DIP("vpadal.%c%d %c%u, %c%u\n", U ? 'u' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
break;
}
mkexpr(mask)),
neg2)));
setFlag_QC(mkexpr(res), mkexpr(tmp), Q, condT);
- DIP("vqabs.s%u %c%u, %c%u\n", 8 << size, Q ? 'q' : 'd', dreg,
+ DIP("vqabs.s%d %c%u, %c%u\n", 8 << size, Q ? 'q' : 'd', dreg,
Q ? 'q' : 'd', mreg);
break;
}
assign(res, binop(op, zero, mkexpr(arg_m)));
setFlag_QC(mkexpr(res), binop(op2, zero, mkexpr(arg_m)),
Q, condT);
- DIP("vqneg.s%u %c%u, %c%u\n", 8 << size, Q ? 'q' : 'd', dreg,
+ DIP("vqneg.s%d %c%u, %c%u\n", 8 << size, Q ? 'q' : 'd', dreg,
Q ? 'q' : 'd', mreg);
break;
}
}
}
assign(res, binop(op, mkexpr(arg_m), zero));
- DIP("vcgt.%c%u %c%u, %c%u, #0\n", F ? 'f' : 's', 8 << size,
+ DIP("vcgt.%c%d %c%u, %c%u, #0\n", F ? 'f' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
break;
}
assign(res, unop(Q ? Iop_NotV128 : Iop_Not64,
binop(op, zero, mkexpr(arg_m))));
}
- DIP("vcge.%c%u %c%u, %c%u, #0\n", F ? 'f' : 's', 8 << size,
+ DIP("vcge.%c%d %c%u, %c%u, #0\n", F ? 'f' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
break;
}
assign(res, unop(Q ? Iop_NotV128 : Iop_Not64,
unop(op, mkexpr(arg_m))));
}
- DIP("vceq.%c%u %c%u, %c%u, #0\n", F ? 'f' : 'i', 8 << size,
+ DIP("vceq.%c%d %c%u, %c%u, #0\n", F ? 'f' : 'i', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
break;
}
assign(res, unop(Q ? Iop_NotV128 : Iop_Not64,
binop(op, mkexpr(arg_m), zero)));
}
- DIP("vcle.%c%u %c%u, %c%u, #0\n", F ? 'f' : 's', 8 << size,
+ DIP("vcle.%c%d %c%u, %c%u, #0\n", F ? 'f' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
break;
}
}
assign(res, binop(op, zero, mkexpr(arg_m)));
}
- DIP("vclt.%c%u %c%u, %c%u, #0\n", F ? 'f' : 's', 8 << size,
+ DIP("vclt.%c%d %c%u, %c%u, #0\n", F ? 'f' : 's', 8 << size,
Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
break;
}
assign(res, unop(Q ? Iop_Abs32Fx4 : Iop_Abs32Fx2,
mkexpr(arg_m)));
}
- DIP("vabs.%c%u %c%u, %c%u\n",
+ DIP("vabs.%c%d %c%u, %c%u\n",
F ? 'f' : 's', 8 << size, Q ? 'q' : 'd', dreg,
Q ? 'q' : 'd', mreg);
break;
}
assign(res, binop(op, zero, mkexpr(arg_m)));
}
- DIP("vneg.%c%u %c%u, %c%u\n",
+ DIP("vneg.%c%d %c%u, %c%u\n",
F ? 'f' : 's', 8 << size, Q ? 'q' : 'd', dreg,
Q ? 'q' : 'd', mreg);
break;
putDRegI64(dreg, mkexpr(new_d), condT);
putDRegI64(mreg, mkexpr(new_m), condT);
}
- DIP("vtrn.%u %c%u, %c%u\n",
+ DIP("vtrn.%d %c%u, %c%u\n",
8 << size, Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
return True;
} else if ((B >> 1) == 2) {
putDRegI64(dreg, mkexpr(new_d), condT);
putDRegI64(mreg, mkexpr(new_m), condT);
}
- DIP("vuzp.%u %c%u, %c%u\n",
+ DIP("vuzp.%d %c%u, %c%u\n",
8 << size, Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
return True;
} else if ((B >> 1) == 3) {
putDRegI64(dreg, mkexpr(new_d), condT);
putDRegI64(mreg, mkexpr(new_m), condT);
}
- DIP("vzip.%u %c%u, %c%u\n",
+ DIP("vzip.%d %c%u, %c%u\n",
8 << size, Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
return True;
} else if (B == 8) {
default: vassert(0);
}
putDRegI64(dreg, unop(op, getQReg(mreg)), condT);
- DIP("vmovn.i%u d%u, q%u\n", 16 << size, dreg, mreg);
+ DIP("vmovn.i%d d%u, q%u\n", 16 << size, dreg, mreg);
return True;
} else if (B == 9 || (B >> 1) == 5) {
/* VQMOVN, VQMOVUN */
case 3: return False;
default: vassert(0);
}
- DIP("vqmovun.s%u d%u, q%u\n", 16 << size, dreg, mreg);
+ DIP("vqmovun.s%d d%u, q%u\n", 16 << size, dreg, mreg);
break;
case 2:
switch (size) {
case 3: return False;
default: vassert(0);
}
- DIP("vqmovn.s%u d%u, q%u\n", 16 << size, dreg, mreg);
+ DIP("vqmovn.s%d d%u, q%u\n", 16 << size, dreg, mreg);
break;
case 3:
switch (size) {
case 3: return False;
default: vassert(0);
}
- DIP("vqmovn.u%u d%u, q%u\n", 16 << size, dreg, mreg);
+ DIP("vqmovn.u%d d%u, q%u\n", 16 << size, dreg, mreg);
break;
default:
vassert(0);
assign(res, binop(op, unop(cvt, getDRegI64(mreg)),
mkU8(shift_imm)));
putQReg(dreg, mkexpr(res), condT);
- DIP("vshll.i%u q%u, d%u, #%u\n", 8 << size, dreg, mreg, 8 << size);
+ DIP("vshll.i%d q%u, d%u, #%d\n", 8 << size, dreg, mreg, 8 << size);
return True;
} else if ((B >> 3) == 3 && (B & 3) == 0) {
/* VCVT (half<->single) */
mk_neon_elem_load_to_one_lane(rD, inc, i, N, size, addr);
else
mk_neon_elem_store_from_one_lane(rD, inc, i, N, size, addr);
- DIP("v%s%u.%u {", bL ? "ld" : "st", N + 1, 8 << size);
+ DIP("v%s%u.%d {", bL ? "ld" : "st", N + 1, 8 << size);
for (j = 0; j <= N; j++) {
if (j)
DIP(", ");
}
}
}
- DIP("vld%u.%u {", N + 1, 8 << size);
+ DIP("vld%u.%d {", N + 1, 8 << size);
for (r = 0; r < regs; r++) {
for (i = 0; i <= N; i++) {
if (i || r)
putIRegA(rN, e, IRTemp_INVALID, Ijk_Boring);
}
- DIP("v%s%u.%u {", bL ? "ld" : "st", N + 1, 8 << INSN(7,6));
+ DIP("v%s%u.%d {", bL ? "ld" : "st", N + 1, 8 << INSN(7,6));
if ((inc == 1 && regs * (N + 1) > 1)
|| (inc == 2 && regs > 1 && N > 0)) {
DIP("d%u-d%u", rD, rD + regs * (N + 1) - 1);
transfer last for a load and first for a store. Requires
reordering xOff/xReg. */
if (0) {
- vex_printf("\nREG_LIST_PRE: (rN=%d)\n", rN);
+ vex_printf("\nREG_LIST_PRE: (rN=%u)\n", rN);
for (i = 0; i < nX; i++)
- vex_printf("reg %d off %d\n", xReg[i], xOff[i]);
+ vex_printf("reg %u off %u\n", xReg[i], xOff[i]);
vex_printf("\n");
}
if (0) {
vex_printf("REG_LIST_POST:\n");
for (i = 0; i < nX; i++)
- vex_printf("reg %d off %d\n", xReg[i], xOff[i]);
+ vex_printf("reg %u off %u\n", xReg[i], xOff[i]);
vex_printf("\n");
}
}
default:
vassert(0);
}
- DIP("vdup.%u q%u, r%u\n", 32 / (1<<size), rD, rT);
+ DIP("vdup.%d q%u, r%u\n", 32 / (1<<size), rD, rT);
} else {
switch (size) {
case 0:
default:
vassert(0);
}
- DIP("vdup.%u d%u, r%u\n", 32 / (1<<size), rD, rT);
+ DIP("vdup.%d d%u, r%u\n", 32 / (1<<size), rD, rT);
}
goto decode_success_vfp;
}
vassert(0); // guarded by "if" above
}
putIRegA(rD, mkexpr(dstT), condT, Ijk_Boring);
- DIP("%s%s r%u, r%u, ROR #%u\n", nm, nCC(INSN_COND), rD, rM, rot);
+ DIP("%s%s r%u, r%u, ROR #%d\n", nm, nCC(INSN_COND), rD, rM, rot);
goto decode_success;
}
/* fall through */
if (sigill_diag) {
vex_printf("disInstr(arm): unhandled instruction: "
"0x%x\n", insn);
- vex_printf(" cond=%d(0x%x) 27:20=%u(0x%02x) "
+ vex_printf(" cond=%d(0x%x) 27:20=%d(0x%02x) "
"4:4=%d "
- "3:0=%u(0x%x)\n",
+ "3:0=%d(0x%x)\n",
(Int)INSN_COND, (UInt)INSN_COND,
(Int)INSN(27,20), (UInt)INSN(27,20),
(Int)INSN(4,4),
UInt bW = INSN0(5,5);
UInt imm2 = INSN1(5,4);
if (!isBadRegT(rM)) {
- DIP("pld%s [r%u, r%u, lsl %d]\n", bW ? "w" : "", rN, rM, imm2);
+ DIP("pld%s [r%u, r%u, lsl %u]\n", bW ? "w" : "", rN, rM, imm2);
goto decode_success;
}
/* fall through */
case 0x3A:
if ((regRs & 0x01) == 0) {
/* Doubleword Shift Right Logical - DSRL; MIPS64 */
- DIP("dsrl r%u, r%u, %d", regRd, regRt, (Int)sImmsa);
+ DIP("dsrl r%u, r%u, %lld", regRd, regRt, sImmsa);
assign(tmpRd, binop(Iop_Shr64, mkexpr(tmpRt), mkU8(uImmsa)));
putIReg(regRd, mkexpr(tmpRd));
} else if ((regRs & 0x01) == 1) {
/* Doubleword Rotate Right - DROTR; MIPS64r2 */
vassert(mode64);
- DIP("drotr r%u, r%u, %d", regRd, regRt, (Int)sImmsa);
+ DIP("drotr r%u, r%u, %lld", regRd, regRt, sImmsa);
IRTemp tmpL = newTemp(ty);
IRTemp tmpR = newTemp(ty);
assign(tmpR, binop(Iop_Shr64, mkexpr(tmpRt), mkU8(uImmsa)));
case 0x3E:
if ((regRs & 0x01) == 0) {
/* Doubleword Shift Right Logical Plus 32 - DSRL32; MIPS64 */
- DIP("dsrl32 r%u, r%u, %d", regRd, regRt, (Int)(sImmsa + 32));
+ DIP("dsrl32 r%u, r%u, %lld", regRd, regRt, sImmsa + 32);
assign(tmpRd, binop(Iop_Shr64, mkexpr(tmpRt), mkU8(uImmsa + 32)));
putIReg(regRd, mkexpr(tmpRd));
} else if ((regRs & 0x01) == 1) {
/* Doubleword Rotate Right Plus 32 - DROTR32; MIPS64r2 */
- DIP("drotr32 r%u, r%u, %d", regRd, regRt, (Int)sImmsa);
+ DIP("drotr32 r%u, r%u, %lld", regRd, regRt, sImmsa);
vassert(mode64);
IRTemp tmpL = newTemp(ty);
IRTemp tmpR = newTemp(ty);
break;
case 0x38: /* Doubleword Shift Left Logical - DSLL; MIPS64 */
- DIP("dsll r%u, r%u, %d", regRd, regRt, (Int)sImmsa);
+ DIP("dsll r%u, r%u, %lld", regRd, regRt, sImmsa);
vassert(mode64);
assign(tmpRd, binop(Iop_Shl64, mkexpr(tmpRt), mkU8(uImmsa)));
putIReg(regRd, mkexpr(tmpRd));
break;
case 0x3C: /* Doubleword Shift Left Logical Plus 32 - DSLL32; MIPS64 */
- DIP("dsll32 r%u, r%u, %d", regRd, regRt, (Int)sImmsa);
+ DIP("dsll32 r%u, r%u, %lld", regRd, regRt, sImmsa);
assign(tmpRd, binop(Iop_Shl64, mkexpr(tmpRt), mkU8(uImmsa + 32)));
putIReg(regRd, mkexpr(tmpRd));
break;
}
case 0x3B: /* Doubleword Shift Right Arithmetic - DSRA; MIPS64 */
- DIP("dsra r%u, r%u, %d", regRd, regRt, (Int)sImmsa);
+ DIP("dsra r%u, r%u, %lld", regRd, regRt, sImmsa);
assign(tmpRd, binop(Iop_Sar64, mkexpr(tmpRt), mkU8(uImmsa)));
putIReg(regRd, mkexpr(tmpRd));
break;
case 0x3F: /* Doubleword Shift Right Arithmetic Plus 32 - DSRA32;
MIPS64 */
- DIP("dsra32 r%u, r%u, %d", regRd, regRt, (Int)sImmsa);
+ DIP("dsra32 r%u, r%u, %lld", regRd, regRt, sImmsa);
assign(tmpRd, binop(Iop_Sar64, mkexpr(tmpRt), mkU8(uImmsa + 32)));
putIReg(regRd, mkexpr(tmpRd));
break;
UInt fpc_cc = get_fpc_cc(cins);
switch (fmt) {
case 0x10: { /* C.cond.S */
- DIP("c.%s.s %d, f%d, f%d", showCondCode(cond), fpc_cc, fs, ft);
+ DIP("c.%s.s %u, f%u, f%u", showCondCode(cond), fpc_cc, fs, ft);
if (fp_mode64) {
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
break;
case 0x11: { /* C.cond.D */
- DIP("c.%s.d %d, f%d, f%d", showCondCode(cond), fpc_cc, fs, ft);
+ DIP("c.%s.d %u, f%u, f%u", showCondCode(cond), fpc_cc, fs, ft);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
t2 = newTemp(Ity_I32);
case 0x1C: {
switch(opc2) {
case 0x03: { /* DMUL rd, rs, rt */
- DIP("dmul r%d, r%d, r%d", regRd, regRs, regRt);
+ DIP("dmul r%u, r%u, r%u", regRd, regRs, regRt);
IRTemp t0 = newTemp(Ity_I128);
assign(t0, binop(Iop_MullU64, getIReg(regRs), getIReg(regRt)));
putIReg(regRd, unop(Iop_128to64, mkexpr(t0)));
/* Unsigned Byte Add - BADDU rd, rs, rt; Cavium OCTEON */
case 0x28: {
- DIP("BADDU r%d, r%d, r%d", regRs, regRt, regRd);
+ DIP("BADDU r%u, r%u, r%u", regRs, regRt, regRd);
IRTemp t0 = newTemp(Ity_I8);
assign(t0, binop(Iop_Add8,
IRTemp old = newTemp(ty);
IRTemp nyu = IRTemp_INVALID;
assign(old, getIReg(regRs));
- DIP("pop r%d, r%d", regRd, regRs);
+ DIP("pop r%u, r%u", regRd, regRs);
for (i = 0; i < 5; i++) {
mask[i] = newTemp(ty);
IRTemp mask[6];
IRTemp old = newTemp(ty);
IRTemp nyu = IRTemp_INVALID;
- DIP("dpop r%d, r%d", regRd, regRs);
+ DIP("dpop r%u, r%u", regRd, regRs);
for (i = 0; i < 6; i++) {
mask[i] = newTemp(ty);
}
case 0x32: /* 5. CINS rd, rs, p, lenm1 */
- DIP("cins r%u, r%u, %d, %d\n", regRt, regRs, p, lenM1);
+ DIP("cins r%u, r%u, %u, %u\n", regRt, regRs, p, lenM1);
assign ( tmp , binop(Iop_Shl64, mkexpr(tmpRs),
mkU8(64-( lenM1+1 ))));
assign ( tmpRt, binop(Iop_Shr64, mkexpr( tmp ),
break;
case 0x2B: /* 20. SNE rd, rs, rt */
- DIP("sne r%d, r%d, r%d", regRd,regRs, regRt);
+ DIP("sne r%u, r%u, r%u", regRd,regRs, regRt);
if (mode64)
putIReg(regRd, unop(Iop_1Uto64, binop(Iop_CmpNE64,
getIReg(regRs),
break;
case 0x2A: /* Set Equals - SEQ; Cavium OCTEON */
- DIP("seq r%d, r%d, %d", regRd, regRs, regRt);
+ DIP("seq r%u, r%u, %d", regRd, regRs, regRt);
if (mode64)
putIReg(regRd, unop(Iop_1Uto64,
binop(Iop_CmpEQ64, getIReg(regRs),
break;
case 0x2E: /* Set Equals Immediate - SEQI; Cavium OCTEON */
- DIP("seqi r%d, r%d, %d", regRt, regRs, imm);
+ DIP("seqi r%u, r%u, %u", regRt, regRs, imm);
if (mode64)
putIReg(regRt, unop(Iop_1Uto64,
binop(Iop_CmpEQ64, getIReg(regRs),
break;
case 0x2F: /* Set Not Equals Immediate - SNEI; Cavium OCTEON */
- DIP("snei r%d, r%d, %d", regRt, regRs, imm);
+ DIP("snei r%u, r%u, %u", regRt, regRs, imm);
if (mode64)
putIReg(regRt, unop(Iop_1Uto64,
binop(Iop_CmpNE64,
case 0x0A: { // lx - Load indexed instructions
switch (get_sa(theInstr)) {
case 0x00: { // LWX rd, index(base)
- DIP("lwx r%d, r%d(r%d)", regRd, regRt, regRs);
+ DIP("lwx r%u, r%u(r%u)", regRd, regRt, regRs);
LOADX_STORE_PATTERN;
putIReg(regRd, mkWidenFrom32(ty, load(Ity_I32, mkexpr(t1)),
True));
break;
}
case 0x04: // LHX rd, index(base)
- DIP("lhx r%d, r%d(r%d)", regRd, regRt, regRs);
+ DIP("lhx r%u, r%u(r%u)", regRd, regRt, regRs);
LOADX_STORE_PATTERN;
if (mode64)
putIReg(regRd, unop(Iop_16Sto64, load(Ity_I16,
mkexpr(t1))));
break;
case 0x08: { // LDX rd, index(base)
- DIP("ldx r%d, r%d(r%d)", regRd, regRt, regRs);
+ DIP("ldx r%u, r%u(r%u)", regRd, regRt, regRs);
vassert(mode64); /* Currently Implemented only for n64 */
LOADX_STORE_PATTERN;
putIReg(regRd, load(Ity_I64, mkexpr(t1)));
break;
}
case 0x06: { // LBUX rd, index(base)
- DIP("lbux r%d, r%d(r%d)", regRd, regRt, regRs);
+ DIP("lbux r%u, r%u(r%u)", regRd, regRt, regRs);
LOADX_STORE_PATTERN;
if (mode64)
putIReg(regRd, unop(Iop_8Uto64, load(Ity_I8,
break;
}
case 0x10: { // LWUX rd, index(base) (Cavium OCTEON)
- DIP("lwux r%d, r%d(r%d)", regRd, regRt, regRs);
+ DIP("lwux r%u, r%u(r%u)", regRd, regRt, regRs);
LOADX_STORE_PATTERN; /* same for both 32 and 64 modes*/
putIReg(regRd, mkWidenFrom32(ty, load(Ity_I32, mkexpr(t1)),
False));
break;
}
case 0x14: { // LHUX rd, index(base) (Cavium OCTEON)
- DIP("lhux r%d, r%d(r%d)", regRd, regRt, regRs);
+ DIP("lhux r%u, r%u(r%u)", regRd, regRt, regRs);
LOADX_STORE_PATTERN;
if (mode64)
putIReg(regRd,
break;
}
case 0x16: { // LBX rd, index(base) (Cavium OCTEON)
- DIP("lbx r%d, r%d(r%d)", regRd, regRs, regRt);
+ DIP("lbx r%u, r%u(r%u)", regRd, regRs, regRt);
LOADX_STORE_PATTERN;
if (mode64)
putIReg(regRd,
case 0x00: { /* Special */
switch (function) {
case 0x10: { /* MFHI */
- DIP("mfhi ac%d r%d", ac_mfhilo, rd);
+ DIP("mfhi ac%u r%u", ac_mfhilo, rd);
putIReg(rd, unop(Iop_64HIto32, getAcc(ac_mfhilo)));
break;
}
case 0x11: { /* MTHI */
- DIP("mthi ac%d r%d", ac, rs);
+ DIP("mthi ac%u r%u", ac, rs);
t1 = newTemp(Ity_I32);
assign(t1, unop(Iop_64to32, getAcc(ac)));
putAcc(ac, binop(Iop_32HLto64, getIReg(rs), mkexpr(t1)));
}
case 0x12: { /* MFLO */
- DIP("mflo ac%d r%d", ac_mfhilo, rd);
+ DIP("mflo ac%u r%u", ac_mfhilo, rd);
putIReg(rd, unop(Iop_64to32, getAcc(ac_mfhilo)));
break;
}
case 0x13: { /* MTLO */
- DIP("mtlo ac%d r%d", ac, rs);
+ DIP("mtlo ac%u r%u", ac, rs);
t1 = newTemp(Ity_I32);
assign(t1, unop(Iop_64HIto32, getAcc(ac)));
putAcc(ac, binop(Iop_32HLto64, mkexpr(t1), getIReg(rs)));
}
case 0x18: { /* MULT */
- DIP("mult ac%d r%d, r%d", ac, rs, rt);
+ DIP("mult ac%u r%u, r%u", ac, rs, rt);
t1 = newTemp(Ity_I64);
assign(t1, binop(Iop_MullS32, mkNarrowTo32(Ity_I32, getIReg(rs)),
mkNarrowTo32(Ity_I32, getIReg(rt))));
}
case 0x19: { /* MULTU */
- DIP("multu ac%d r%d, r%d", ac, rs, rt);
+ DIP("multu ac%u r%u, r%u", ac, rs, rt);
t1 = newTemp(Ity_I64);
assign(t1, binop(Iop_MullU32, mkNarrowTo32(Ity_I32, getIReg(rs)),
mkNarrowTo32(Ity_I32,
case 0x1C: { /* Special2 */
switch (function) {
case 0x00: { /* MADD */
- DIP("madd ac%d, r%d, r%d", ac, rs, rt);
+ DIP("madd ac%u, r%u, r%u", ac, rs, rt);
t1 = newTemp(Ity_I64);
t2 = newTemp(Ity_I64);
t3 = newTemp(Ity_I64);
break;
}
case 0x01: { /* MADDU */
- DIP("maddu ac%d r%d, r%d", ac, rs, rt);
+ DIP("maddu ac%u r%u, r%u", ac, rs, rt);
t1 = newTemp(Ity_I64);
t2 = newTemp(Ity_I64);
t3 = newTemp(Ity_I64);
break;
}
case 0x04: { /* MSUB */
- DIP("msub ac%d r%d, r%d", ac, rs, rt);
+ DIP("msub ac%u r%u, r%u", ac, rs, rt);
t1 = newTemp(Ity_I64);
t2 = newTemp(Ity_I64);
t3 = newTemp(Ity_I64);
break;
}
case 0x05: { /* MSUBU */
- DIP("msubu ac%d r%d, r%d", ac, rs, rt);
+ DIP("msubu ac%u r%u, r%u", ac, rs, rt);
t1 = newTemp(Ity_I64);
t2 = newTemp(Ity_I64);
t3 = newTemp(Ity_I64);
case 0x12: { /* ABSQ_S.PH */
switch (sa) {
case 0x1: { /* ABSQ_S.QB */
- DIP("absq_s.qb r%d, r%d", rd, rt);
+ DIP("absq_s.qb r%u, r%u", rd, rt);
vassert(!mode64);
t0 = newTemp(Ity_I8);
t1 = newTemp(Ity_I1);
break;
}
case 0x2: { /* REPL.QB */
- DIP("repl.qb r%d, %d", rd, dsp_imm);
+ DIP("repl.qb r%u, %u", rd, dsp_imm);
vassert(!mode64);
putIReg(rd, mkU32((dsp_imm << 24) | (dsp_imm << 16) |
break;
}
case 0x3: { /* REPLV.QB */
- DIP("replv.qb r%d, r%d", rd, rt);
+ DIP("replv.qb r%u, r%u", rd, rt);
vassert(!mode64);
t0 = newTemp(Ity_I8);
break;
}
case 0x4: { /* PRECEQU.PH.QBL */
- DIP("precequ.ph.qbl r%d, r%d", rd, rt);
+ DIP("precequ.ph.qbl r%u, r%u", rd, rt);
vassert(!mode64);
putIReg(rd, binop(Iop_Or32,
break;
}
case 0x5: { /* PRECEQU.PH.QBR */
- DIP("precequ.ph.qbr r%d, r%d", rd, rt);
+ DIP("precequ.ph.qbr r%u, r%u", rd, rt);
vassert(!mode64);
putIReg(rd, binop(Iop_Or32,
break;
}
case 0x6: { /* PRECEQU.PH.QBLA */
- DIP("precequ.ph.qbla r%d, r%d", rd, rt);
+ DIP("precequ.ph.qbla r%u, r%u", rd, rt);
vassert(!mode64);
putIReg(rd, binop(Iop_Or32,
break;
}
case 0x7: { /* PRECEQU.PH.QBRA */
- DIP("precequ.ph.qbra r%d, r%d", rd, rt);
+ DIP("precequ.ph.qbra r%u, r%u", rd, rt);
vassert(!mode64);
putIReg(rd, binop(Iop_Or32,
break;
}
case 0x9: { /* ABSQ_S.PH */
- DIP("absq_s.ph r%d, r%d", rd, rt);
+ DIP("absq_s.ph r%u, r%u", rd, rt);
vassert(!mode64);
t0 = newTemp(Ity_I16);
t1 = newTemp(Ity_I1);
break;
}
case 0xA: { /* REPL.PH */
- DIP("repl.ph r%d, %d", rd, dsp_imm);
+ DIP("repl.ph r%u, %u", rd, dsp_imm);
vassert(!mode64);
UShort immediate = extend_s_10to16(dsp_imm);
break;
}
case 0xB: { /* REPLV.PH */
- DIP("replv.ph r%d, r%d", rd, rt);
+ DIP("replv.ph r%u, r%u", rd, rt);
vassert(!mode64);
putIReg(rd, binop(Iop_16HLto32,
break;
}
case 0xC: { /* PRECEQ.W.PHL */
- DIP("preceq.w.phl r%d, r%d", rd, rt);
+ DIP("preceq.w.phl r%u, r%u", rd, rt);
vassert(!mode64);
putIReg(rd, binop(Iop_And32,
getIReg(rt),
break;
}
case 0xD: { /* PRECEQ.W.PHR */
- DIP("preceq.w.phr r%d, r%d", rd, rt);
+ DIP("preceq.w.phr r%u, r%u", rd, rt);
vassert(!mode64);
putIReg(rd, binop(Iop_16HLto32,
unop(Iop_32to16, getIReg(rt)),
break;
}
case 0x11: { /* ABSQ_S.W */
- DIP("absq_s.w r%d, r%d", rd, rt);
+ DIP("absq_s.w r%u, r%u", rd, rt);
vassert(!mode64);
t0 = newTemp(Ity_I1);
t1 = newTemp(Ity_I1);
break;
}
case 0x1B: { /* BITREV */
- DIP("bitrev r%d, r%d", rd, rt);
+ DIP("bitrev r%u, r%u", rd, rt);
vassert(!mode64);
/* 32bit reversal as seen on Bit Twiddling Hacks site
http://graphics.stanford.edu/~seander/bithacks.html
break;
}
case 0x1C: { /* PRECEU.PH.QBL */
- DIP("preceu.ph.qbl r%d, r%d", rd, rt);
+ DIP("preceu.ph.qbl r%u, r%u", rd, rt);
vassert(!mode64);
putIReg(rd, binop(Iop_Or32,
break;
}
case 0x1E: { /* PRECEU.PH.QBLA */
- DIP("preceu.ph.qbla r%d, r%d", rd, rt);
+ DIP("preceu.ph.qbla r%u, r%u", rd, rt);
vassert(!mode64);
putIReg(rd, binop(Iop_Or32,
break;
}
case 0x1D: { /* PRECEU.PH.QBR */
- DIP("preceu.ph.qbr r%d, r%d", rd, rt);
+ DIP("preceu.ph.qbr r%u, r%u", rd, rt);
vassert(!mode64);
putIReg(rd, binop(Iop_Or32,
break;
}
case 0x1F: { /* PRECEU.PH.QBRA */
- DIP("preceu.ph.qbra r%d, r%d", rd, rt);
+ DIP("preceu.ph.qbra r%u, r%u", rd, rt);
vassert(!mode64);
putIReg(rd, binop(Iop_Or32,
case 0x38: { /* EXTR.W */
switch(sa) {
case 0x0: { /* EXTR.W */
- DIP("extr.w r%d, ac%d, %d", rt, ac, rs);
+ DIP("extr.w r%u, ac%u, %u", rt, ac, rs);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
break;
}
case 0x1: { /* EXTRV.W */
- DIP("extrv.w r%d, ac%d, r%d", rt, ac, rs);
+ DIP("extrv.w r%u, ac%u, r%u", rt, ac, rs);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
break;
}
case 0x2: { /* EXTP */
- DIP("extp r%d, ac%d, %d", rt, ac, rs);
+ DIP("extp r%u, ac%u, %u", rt, ac, rs);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I32);
break;
}
case 0x3: { /* EXTPV */
- DIP("extpv r%d, ac%d, r%d", rt, ac, rs);
+ DIP("extpv r%u, ac%u, r%u", rt, ac, rs);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I32);
break;
}
case 0x4: { /* EXTR_R.W */
- DIP("extr_r.w r%d, ac%d, %d", rt, ac, rs);
+ DIP("extr_r.w r%u, ac%u, %u", rt, ac, rs);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
break;
}
case 0x5: { /* EXTRV_R.W */
- DIP("extrv_r.w r%d, ac%d, r%d", rt, ac, rs);
+ DIP("extrv_r.w r%u, ac%u, r%u", rt, ac, rs);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
break;
}
case 0x6: { /* EXTR_RS.W */
- DIP("extr_rs.w r%d, ac%d, %d", rt, ac, rs);
+ DIP("extr_rs.w r%u, ac%u, %u", rt, ac, rs);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
break;
}
case 0x7: { /* EXTRV_RS.W */
- DIP("extrv_rs.w r%d, ac%d, r%d", rt, ac, rs);
+ DIP("extrv_rs.w r%u, ac%u, r%u", rt, ac, rs);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
break;
}
case 0xA: { /* EXTPDP */
- DIP("extpdp r%d, ac%d, %d", rt, ac, rs);
+ DIP("extpdp r%u, ac%u, %u", rt, ac, rs);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I32);
break;
}
case 0xB: { /* EXTPDPV */
- DIP("extpdpv r%d, ac%d, r%d", rt, ac, rs);
+ DIP("extpdpv r%u, ac%u, r%u", rt, ac, rs);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I32);
break;
}
case 0xE: { /* EXTR_S.H */
- DIP("extr_s.h r%d, ac%d, %d", rt, ac, rs);
+ DIP("extr_s.h r%u, ac%u, %u", rt, ac, rs);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
break;
}
case 0xF: { /* EXTRV_S.H */
- DIP("extrv_s.h r%d, ac%d, %d", rt, ac, rs);
+ DIP("extrv_s.h r%u, ac%u, %u", rt, ac, rs);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
break;
}
case 0x12: { /* RDDSP*/
- DIP("rddsp r%d, mask 0x%x", rd, rddsp_mask);
+ DIP("rddsp r%u, mask 0x%x", rd, rddsp_mask);
vassert(!mode64);
putIReg(rd, mkU32(0x0));
break;
}
case 0x13: { /* WRDSP */
- DIP("wrdsp r%d, mask 0x%x", rs, wrdsp_mask);
+ DIP("wrdsp r%u, mask 0x%x", rs, wrdsp_mask);
vassert(!mode64);
if ((wrdsp_mask & 0x3f) == 0x3f) {
break;
}
case 0x1A: { /* SHILO */
- DIP("shilo ac%d, %d", ac, shift);
+ DIP("shilo ac%u, %u", ac, shift);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
break;
}
case 0x1B: { /* SHILOV */
- DIP("shilov ac%d, r%d", ac, rs);
+ DIP("shilov ac%u, r%u", ac, rs);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I32);
break;
}
case 0x1F: { /* MTHLIP */
- DIP("mthlip r%d, ac%d", rs, ac);
+ DIP("mthlip r%u, ac%u", rs, ac);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I32);
case 0xA: { /* LX */
switch(sa) {
case 0x0: { /* LWX */
- DIP("lwx r%d, r%d(r%d)", rd, rt, rs);
+ DIP("lwx r%u, r%u(r%u)", rd, rt, rs);
vassert(!mode64);
t0 = newTemp(Ity_I32);
break;
}
case 0x4: { /* LHX */
- DIP("lhx r%d, r%d(r%d)", rd, rt, rs);
+ DIP("lhx r%u, r%u(r%u)", rd, rt, rs);
vassert(!mode64);
t0 = newTemp(Ity_I32);
break;
}
case 0x6: { /* LBUX */
- DIP("lbux r%d, r%d(r%d)", rd, rt, rs);
+ DIP("lbux r%u, r%u(r%u)", rd, rt, rs);
vassert(!mode64);
t0 = newTemp(Ity_I32);
case 0xC: { /* INSV */
switch(sa) {
case 0x0: { /* INSV */
- DIP("insv r%d, r%d", rt, rs);
+ DIP("insv r%u, r%u", rt, rs);
vassert(!mode64);
t0 = newTemp(Ity_I32);
case 0x10: { /* ADDU.QB */
switch(sa) {
case 0x00: { /* ADDU.QB */
- DIP("addu.qb r%d, r%d, r%d", rd, rs, rt);
+ DIP("addu.qb r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I1);
break;
}
case 0x1: { /* SUBU.QB */
- DIP("subu.qb r%d, r%d, r%d", rd, rs, rt);
+ DIP("subu.qb r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I1);
break;
}
case 0x04: { /* ADDU_S.QB */
- DIP("addu_s.qb r%d, r%d, r%d", rd, rs, rt);
+ DIP("addu_s.qb r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I1);
break;
}
case 0x05: { /* SUBU_S.QB */
- DIP("subu_s.qb r%d, r%d, r%d", rd, rs, rt);
+ DIP("subu_s.qb r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t1 = newTemp(Ity_I32);
t2 = newTemp(Ity_I1);
break;
}
case 0x6: { /* MULEU_S.PH.QBL */
- DIP("muleu_s.ph.qbl r%d, r%d, r%d", rd, rs, rt);
+ DIP("muleu_s.ph.qbl r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
break;
}
case 0x7: { /* MULEU_S.PH.QBR */
- DIP("muleu_s.ph.qbr r%d, r%d, r%d", rd, rs, rt);
+ DIP("muleu_s.ph.qbr r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
break;
}
case 0x08: { /* ADDU.PH */
- DIP("addu.ph r%d, r%d, r%d", rd, rs, rt);
+ DIP("addu.ph r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I1);
break;
}
case 0x9: { /* SUBU.PH */
- DIP("subu.ph r%d, r%d, r%d", rd, rs, rt);
+ DIP("subu.ph r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I1);
break;
}
case 0xA: { /* ADDQ.PH */
- DIP("addq.ph r%d, r%d, r%d", rd, rs, rt);
+ DIP("addq.ph r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I1);
break;
}
case 0xB: { /* SUBQ.PH */
- DIP("subq.ph r%d, r%d, r%d", rd, rs, rt);
+ DIP("subq.ph r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I1);
break;
}
case 0xC: { /* ADDU_S.PH */
- DIP("addu_s.ph r%d, r%d, r%d", rd, rs, rt);
+ DIP("addu_s.ph r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I1);
break;
}
case 0xD: { /* SUBU_S.PH */
- DIP("subu_s.ph r%d, r%d, r%d", rd, rs, rt);
+ DIP("subu_s.ph r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I1);
break;
}
case 0xE: { /* ADDQ_S.PH */
- DIP("addq_s.ph r%d r%d, r%d", rd, rs, rt);
+ DIP("addq_s.ph r%u r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I1);
break;
}
case 0xF: { /* SUBQ_S.PH */
- DIP("subq_s.ph r%d r%d, r%d", rd, rs, rt);
+ DIP("subq_s.ph r%u r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I1);
break;
}
case 0x10: { /* ADDSC */
- DIP("addsc r%d, r%d, r%d", rd, rs, rt);
+ DIP("addsc r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I1);
break;
}
case 0x11: { /* ADDWC */
- DIP("addwc r%d, r%d, r%d", rd, rs, rt);
+ DIP("addwc r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I64);
break;
}
case 0x12: { /* MODSUB */
- DIP("modsub r%d, r%d, r%d", rd, rs, rt);
+ DIP("modsub r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
break;
}
case 0x14: { /* RADDU.W.QB */
- DIP("raddu.w.qb r%d, r%d", rd, rs);
+ DIP("raddu.w.qb r%u, r%u", rd, rs);
vassert(!mode64);
putIReg(rd, binop(Iop_Add32,
binop(Iop_Add32,
break;
}
case 0x16: { /* ADDQ_S.W */
- DIP("addq_s.w r%d, r%d, r%d", rd, rs, rt);
+ DIP("addq_s.w r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I1);
break;
}
case 0x17: { /* SUBQ_S.W */
- DIP("subq_s.w r%d, r%d, r%d", rd, rs, rt);
+ DIP("subq_s.w r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I1);
break;
}
case 0x1C: { /* MULEQ_S.W.PHL */
- DIP("muleq_s.w.phl r%d, r%d, r%d", rd, rs, rt);
+ DIP("muleq_s.w.phl r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I1);
break;
}
case 0x1D: { /* MULEQ_S.W.PHR */
- DIP("muleq_s.w.phr r%d, r%d, r%d", rd, rs, rt);
+ DIP("muleq_s.w.phr r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I1);
break;
}
case 0x1E: { /* MULQ_S.PH */
- DIP("mulq_s.ph r%d, r%d, r%d", rd, rs, rt);
+ DIP("mulq_s.ph r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
break;
}
case 0x1F: { /* MULQ_RS.PH */
- DIP("mulq_rs.ph r%d, r%d, r%d", rd, rs, rt);
+ DIP("mulq_rs.ph r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I1);
case 0x11: { /* CMPU.EQ.QB */
switch(sa) {
case 0x0: { /* CMPU.EQ.QB */
- DIP("cmpu.eq.qb r%d, r%d", rs, rt);
+ DIP("cmpu.eq.qb r%u, r%u", rs, rt);
vassert(!mode64);
t1 = newTemp(Ity_I1);
t2 = newTemp(Ity_I1);
break;
}
case 0x1: { /* CMPU.LT.QB */
- DIP("cmpu.lt.qb r%d, r%d", rs, rt);
+ DIP("cmpu.lt.qb r%u, r%u", rs, rt);
vassert(!mode64);
t1 = newTemp(Ity_I1);
t2 = newTemp(Ity_I1);
break;
}
case 0x2: { /* CMPU.LE.QB */
- DIP("cmpu.le.qb r%d, r%d", rs, rt);
+ DIP("cmpu.le.qb r%u, r%u", rs, rt);
vassert(!mode64);
t1 = newTemp(Ity_I1);
t2 = newTemp(Ity_I1);
break;
}
case 0x3: { /* PICK.QB */
- DIP("pick.qb r%d, r%d, r%d", rd, rs, rt);
+ DIP("pick.qb r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I8);
break;
}
case 0x4: { /* CMPGU.EQ.QB */
- DIP("cmpgu.eq.qb r%d, r%d, r%d", rd, rs, rt);
+ DIP("cmpgu.eq.qb r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t1 = newTemp(Ity_I1);
t2 = newTemp(Ity_I1);
break;
}
case 0x5: { /* CMPGU.LT.QB */
- DIP("cmpgu.lt.qb r%d, r%d, r%d", rd, rs, rt);
+ DIP("cmpgu.lt.qb r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t1 = newTemp(Ity_I1);
t2 = newTemp(Ity_I1);
break;
}
case 0x6: { /* CMPGU.LE.QB */
- DIP("cmpgu.le.qb r%d, r%d, r%d", rd, rs, rt);
+ DIP("cmpgu.le.qb r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t1 = newTemp(Ity_I1);
t2 = newTemp(Ity_I1);
break;
}
case 0x8: { /* CMP.EQ.PH */
- DIP("cmp.eq.ph r%d, r%d", rs, rt);
+ DIP("cmp.eq.ph r%u, r%u", rs, rt);
vassert(!mode64);
t1 = newTemp(Ity_I1);
t2 = newTemp(Ity_I1);
break;
}
case 0x9: { /* CMP.LT.PH */
- DIP("cmp.lt.ph r%d, r%d", rs, rt);
+ DIP("cmp.lt.ph r%u, r%u", rs, rt);
vassert(!mode64);
t1 = newTemp(Ity_I1);
t2 = newTemp(Ity_I1);
break;
}
case 0xA: { /* CMP.LE.PH */
- DIP("cmp.le.ph r%d, r%d", rs, rt);
+ DIP("cmp.le.ph r%u, r%u", rs, rt);
vassert(!mode64);
t1 = newTemp(Ity_I1);
t2 = newTemp(Ity_I1);
break;
}
case 0xB: { /* PICK.PH */
- DIP("pick.qb r%d, r%d, r%d", rd, rs, rt);
+ DIP("pick.qb r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I16);
break;
}
case 0xC: { /* PRECRQ.QB.PH */
- DIP("precrq.qb.ph r%d, r%d, %d", rd, rs, rt);
+ DIP("precrq.qb.ph r%u, r%u, %u", rd, rs, rt);
vassert(!mode64);
putIReg(rd,
binop(Iop_16HLto32,
break;
}
case 0xD: { /* PRECR.QB.PH */
- DIP("precr.qb.ph r%d, r%d, r%d", rd, rs, rt);
+ DIP("precr.qb.ph r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
putIReg(rd,
break;
}
case 0xF: { /* PRECRQU_S.QB.PH */
- DIP("precrqu_s.qb.ph r%d, r%d, %d", rd, rs, rt);
+ DIP("precrqu_s.qb.ph r%u, r%u, %u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I8);
t1 = newTemp(Ity_I8);
break;
}
case 0x14: { /* PRECRQ.PH.W */
- DIP("precrq.ph.w r%d, r%d, %d", rd, rs, rt);
+ DIP("precrq.ph.w r%u, r%u, %u", rd, rs, rt);
vassert(!mode64);
putIReg(rd, binop(Iop_16HLto32,
unop(Iop_32HIto16, getIReg(rs)),
break;
}
case 0x15: { /* PRECRQ_RS.PH.W */
- DIP("precrq_rs.ph.w r%d, r%d, %d", rd, rs, rt);
+ DIP("precrq_rs.ph.w r%u, r%u, %u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I1);
break;
}
case 0x1E: { /* PRECR_SRA.PH.W */
- DIP("precr_sra.ph.w r%d, r%d, %d", rt, rs, rd);
+ DIP("precr_sra.ph.w r%u, r%u, %u", rt, rs, rd);
vassert(!mode64);
if (0 == rd) {
break;
}
case 0x1F: { /* PRECR_SRA_R.PH.W */
- DIP("precr_sra_r.ph.w r%d, r%d, %d", rt, rs, rd);
+ DIP("precr_sra_r.ph.w r%u, r%u, %u", rt, rs, rd);
vassert(!mode64);
t0 = newTemp(Ity_I32);
break;
}
case 0xE: { /* PACKRL.PH */
- DIP("packrl.ph r%d, r%d, r%d", rd, rs, rt);
+ DIP("packrl.ph r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
putIReg(rd, binop(Iop_16HLto32,
break;
}
case 0x18: { /* CMPGDU.EQ.QB */
- DIP("cmpgdu.eq.qb r%d, r%d, r%d", rd, rs, rt);
+ DIP("cmpgdu.eq.qb r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t1 = newTemp(Ity_I1);
t2 = newTemp(Ity_I1);
break;
}
case 0x19: { /* CMPGDU.LT.QB */
- DIP("cmpgdu.lt.qb r%d, r%d, r%d", rd, rs, rt);
+ DIP("cmpgdu.lt.qb r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t1 = newTemp(Ity_I1);
t2 = newTemp(Ity_I1);
break;
}
case 0x1A: { /* CMPGDU.LE.QB */
- DIP("cmpgdu.le.qb r%d, r%d, r%d", rd, rs, rt);
+ DIP("cmpgdu.le.qb r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t1 = newTemp(Ity_I1);
t2 = newTemp(Ity_I1);
case 0x13: { /* SHLL.QB */
switch(sa) {
case 0x0: { /* SHLL.QB */
- DIP("shll.qb r%d, r%d, %d", rd, rt, rs);
+ DIP("shll.qb r%u, r%u, %u", rd, rt, rs);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I1);
break;
}
case 0x3: { /* SHRL.QB */
- DIP("shrl.qb r%d, r%d, %d", rd, rt, rs);
+ DIP("shrl.qb r%u, r%u, %u", rd, rt, rs);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I8);
break;
}
case 0x2: { /* SHLLV.QB */
- DIP("shllv.qb r%d, r%d, r%d", rd, rt, rs);
+ DIP("shllv.qb r%u, r%u, r%u", rd, rt, rs);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I1);
break;
}
case 0x1: { /* SHRLV.QB */
- DIP("shrlv.qb r%d, r%d, r%d", rd, rt, rs);
+ DIP("shrlv.qb r%u, r%u, r%u", rd, rt, rs);
vassert(!mode64);
t0 = newTemp(Ity_I8);
t1 = newTemp(Ity_I8);
break;
}
case 0x4: { /* SHRA.QB */
- DIP("shra.qb r%d, r%d, %d", rd, rt, rs);
+ DIP("shra.qb r%u, r%u, %u", rd, rt, rs);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
break;
}
case 0x5: { /* SHRA_R.QB */
- DIP("shra_r.qb r%d, r%d, %d", rd, rt, rs);
+ DIP("shra_r.qb r%u, r%u, %u", rd, rt, rs);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I8);
break;
}
case 0x6: { /* SHRAV.QB */
- DIP("shrav.qb r%d, r%d, %d", rd, rt, rs);
+ DIP("shrav.qb r%u, r%u, %u", rd, rt, rs);
vassert(!mode64);
t0 = newTemp(Ity_I32);
break;
}
case 0x7: { /* SHRAV_R.QB */
- DIP("shrav_r.qb r%d, r%d, r%d", rd, rt, rs);
+ DIP("shrav_r.qb r%u, r%u, r%u", rd, rt, rs);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I8);
break;
}
case 0x8: { /* SHLL.PH */
- DIP("shll.ph r%d, r%d, %d", rd, rt, rs);
+ DIP("shll.ph r%u, r%u, %u", rd, rt, rs);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
break;
}
case 0x9: { /* SHRA.PH */
- DIP("shra.ph r%d, r%d, %d", rd, rt, rs);
+ DIP("shra.ph r%u, r%u, %u", rd, rt, rs);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
break;
}
case 0xA: { /* SHLLV.PH */
- DIP("shllv.ph r%d, r%d, r%d", rd, rt, rs);
+ DIP("shllv.ph r%u, r%u, r%u", rd, rt, rs);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t2 = newTemp(Ity_I32);
break;
}
case 0xB: { /* SHRAV.PH */
- DIP("shrav.ph r%d, r%d, r%d", rd, rt, rs);
+ DIP("shrav.ph r%u, r%u, r%u", rd, rt, rs);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I1);
break;
}
case 0xC: { /* SHLL_S.PH */
- DIP("shll_s.ph r%d, r%d, %d", rd, rt, rs);
+ DIP("shll_s.ph r%u, r%u, %u", rd, rt, rs);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
break;
}
case 0xD: { /* SHRA_R.PH */
- DIP("shra.ph r%d, r%d, %d", rd, rt, rs);
+ DIP("shra.ph r%u, r%u, %u", rd, rt, rs);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
break;
}
case 0xE: { /* SHLLV_S.PH */
- DIP("shllv_s.ph r%d, r%d, r%d", rd, rt, rs);
+ DIP("shllv_s.ph r%u, r%u, r%u", rd, rt, rs);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t2 = newTemp(Ity_I32);
break;
}
case 0xF: { /* SHRAV_R.PH */
- DIP("shrav_r.ph r%d, r%d, r%d", rd, rt, rs);
+ DIP("shrav_r.ph r%u, r%u, r%u", rd, rt, rs);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I1);
break;
}
case 0x14: { /* SHLL_S.W */
- DIP("shll_s.w r%d, r%d, %d", rd, rt, rs);
+ DIP("shll_s.w r%u, r%u, %u", rd, rt, rs);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
break;
}
case 0x15: { /* SHRA_R.W */
- DIP("shra_r.w r%d, r%d, %d", rd, rt, rs);
+ DIP("shra_r.w r%u, r%u, %u", rd, rt, rs);
vassert(!mode64);
if (0 == rs) {
putIReg(rd, getIReg(rt));
break;
}
case 0x16: { /* SHLLV_S.W */
- DIP("shllv_s.w r%d, r%d, r%d", rd, rt, rs);
+ DIP("shllv_s.w r%u, r%u, r%u", rd, rt, rs);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I1);
break;
}
case 0x17: { /* SHRAV_R.W */
- DIP("shrav_r.w r%d, r%d, r%d", rd, rt, rs);
+ DIP("shrav_r.w r%u, r%u, r%u", rd, rt, rs);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I1);
break;
}
case 0x19: { /* SHRL.PH */
- DIP("shrl.ph r%d, r%d, %d", rd, rt, rs);
+ DIP("shrl.ph r%u, r%u, %u", rd, rt, rs);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
break;
}
case 0x1B: { /* SHRLV.PH */
- DIP("shrlv.ph r%d, r%d, r%d", rd, rt, rs);
+ DIP("shrlv.ph r%u, r%u, r%u", rd, rt, rs);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I1);
case 0x18: { /* ADDUH.QB/MUL.PH */
switch(sa) {
case 0x00: { /* ADDUH.QB */
- DIP("adduh.qb r%d, r%d, r%d", rd, rs, rt);
+ DIP("adduh.qb r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
break;
}
case 0x1: { /* SUBUH.QB */
- DIP("subuh.qb r%d, r%d, r%d", rd, rs, rt);
+ DIP("subuh.qb r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
break;
}
case 0x02: { /* ADDUH_R.QB */
- DIP("adduh_r.qb r%d, r%d, r%d", rd, rs, rt);
+ DIP("adduh_r.qb r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
break;
}
case 0x3: { /* SUBUH_R.QB */
- DIP("subuh_r.qb r%d, r%d, r%d", rd, rs, rt);
+ DIP("subuh_r.qb r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t1 = newTemp(Ity_I32);
t2 = newTemp(Ity_I32);
break;
}
case 0x8: { /* ADDQH.PH */
- DIP("addqh.ph r%d, r%d, r%d", rd, rs, rt);
+ DIP("addqh.ph r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I16);
break;
}
case 0x9: { /* SUBQH.PH */
- DIP("subqh.ph r%d, r%d, r%d", rd, rs, rt);
+ DIP("subqh.ph r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
putIReg(rd, binop(Iop_HSub16Sx2,
break;
}
case 0xA: {/* ADDQH_R.PH */
- DIP("addqh_r.ph r%d, r%d, r%d", rd, rs, rt);
+ DIP("addqh_r.ph r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I16);
break;
}
case 0xB: { /* SUBQH_R.PH */
- DIP("subqh_r.ph r%d, r%d, r%d", rd, rs, rt);
+ DIP("subqh_r.ph r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I16);
break;
}
case 0xC: { /* MUL.PH */
- DIP("mul.ph r%d, r%d, r%d", rd, rs, rt);
+ DIP("mul.ph r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
break;
}
case 0xE: { /* MUL_S.PH */
- DIP("mul_s.ph r%d r%d, r%d", rd, rs, rt);
+ DIP("mul_s.ph r%u r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
break;
}
case 0x10: { /* ADDQH.W */
- DIP("addqh.w r%d, r%d, r%d", rd, rs, rt);
+ DIP("addqh.w r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
break;
}
case 0x11: { /* SUBQH.W */
- DIP("subqh.w r%d, r%d, r%d", rd, rs, rt);
+ DIP("subqh.w r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
break;
}
case 0x12: { /* ADDQH_R.W */
- DIP("addqh_r.w r%d, r%d, r%d", rd, rs, rt);
+ DIP("addqh_r.w r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
break;
}
case 0x13: { /* SUBQH_R.W */
- DIP("subqh_r.w r%d, r%d, r%d", rd, rs, rt);
+ DIP("subqh_r.w r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
break;
}
case 0x16: { /* MULQ_S.W */
- DIP("mulq_s.w r%d, r%d, r%d", rd, rs, rt);
+ DIP("mulq_s.w r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I1);
break;
}
case 0x17: { /* MULQ_RS.W */
- DIP("mulq_rs.w r%d, r%d, r%d", rd, rs, rt);
+ DIP("mulq_rs.w r%u, r%u, r%u", rd, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I1);
case 0x30: { /* DPAQ.W.PH */
switch(sa) {
case 0x0: { /* DPA.W.PH */
- DIP("dpa.w.ph ac%d, r%d, r%d", ac, rs, rt);
+ DIP("dpa.w.ph ac%u, r%u, r%u", ac, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I64);
break;
}
case 0x1: { /* DPS.W.PH */
- DIP("dps.w.ph ac%d, r%d, r%d", ac, rs, rt);
+ DIP("dps.w.ph ac%u, r%u, r%u", ac, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I64);
break;
}
case 0x2: { /* MULSA.W.PH */
- DIP("mulsa.w.ph ac%d, r%d, r%d", ac, rs, rt);
+ DIP("mulsa.w.ph ac%u, r%u, r%u", ac, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
break;
}
case 0x3: { /* DPAU.H.QBL */
- DIP("dpau.h.qbl ac%d, r%d, r%d", ac, rs, rt);
+ DIP("dpau.h.qbl ac%u, r%u, r%u", ac, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
break;
}
case 0x4: { /* DPAQ_S.W.PH */
- DIP("dpaq_s.w.ph ac%d, r%d, r%d", ac, rs, rt);
+ DIP("dpaq_s.w.ph ac%u, r%u, r%u", ac, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
break;
}
case 0x5: { /* DPSQ_S.W.PH */
- DIP("dpsq_s.w.ph ac%d r%d, r%d", ac, rs, rt);
+ DIP("dpsq_s.w.ph ac%u r%u, r%u", ac, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
break;
}
case 0x6: { /* MULSAQ_S.W.PH */
- DIP("mulsaq_s.w.ph ac%d r%d, r%d", ac, rs, rt);
+ DIP("mulsaq_s.w.ph ac%u r%u, r%u", ac, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
break;
}
case 0x7: { /* DPAU.H.QBR */
- DIP("dpau.h.qbr ac%d, r%d, r%d", ac, rs, rt);
+ DIP("dpau.h.qbr ac%u, r%u, r%u", ac, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
break;
}
case 0x8: { /* DPAX.W.PH */
- DIP("dpax.w.ph ac%d, r%d, r%d", ac, rs, rt);
+ DIP("dpax.w.ph ac%u, r%u, r%u", ac, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
break;
}
case 0x9: { /* DPSX.W.PH */
- DIP("dpsx.w.ph ac%d r%d, r%d", ac, rs, rt);
+ DIP("dpsx.w.ph ac%u r%u, r%u", ac, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I64);
break;
}
case 0xB: { /* DPSU.H.QBL */
- DIP("dpsu.h.qbl ac%d, r%d, r%d", ac, rs, rt);
+ DIP("dpsu.h.qbl ac%u, r%u, r%u", ac, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
break;
}
case 0xC: { /* DPAQ_SA.L.W */
- DIP("dpaq_sa.l.w ac%d, r%d, r%d", ac, rs, rt);
+ DIP("dpaq_sa.l.w ac%u, r%u, r%u", ac, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
break;
}
case 0xD: { /* DPSQ_SA.L.W */
- DIP("dpsq_sa.l.w ac%d, r%d, r%d", ac, rs, rt);
+ DIP("dpsq_sa.l.w ac%u, r%u, r%u", ac, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
break;
}
case 0xF: { /* DPSU.H.QBR */
- DIP("dpsu.h.qbr ac%d r%d, r%d", ac, rs, rt);
+ DIP("dpsu.h.qbr ac%u r%u, r%u", ac, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
break;
}
case 0x10: { /* MAQ_SA.W.PHL */
- DIP("maq_sa.w.phl ac%d, r%d, r%d", ac, rs, rt);
+ DIP("maq_sa.w.phl ac%u, r%u, r%u", ac, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
break;
}
case 0x12: { /* MAQ_SA.W.PHR */
- DIP("maq_sa.w.phr ac%d, r%d, r%d", ac, rs, rt);
+ DIP("maq_sa.w.phr ac%u, r%u, r%u", ac, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
break;
}
case 0x14: { /* MAQ_S.W.PHL */
- DIP("maq_s.w.phl ac%d, r%d, r%d", ac, rs, rt);
+ DIP("maq_s.w.phl ac%u, r%u, r%u", ac, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
break;
}
case 0x16: { /* MAQ_S.W.PHR */
- DIP("maq_s.w.phr ac%d, r%d, r%d", ac, rs, rt);
+ DIP("maq_s.w.phr ac%u, r%u, r%u", ac, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
break;
}
case 0x18: { /* DPAQX_S.W.PH */
- DIP("dpaqx_s.w.ph ac%d, r%d, r%d", ac, rs, rt);
+ DIP("dpaqx_s.w.ph ac%u, r%u, r%u", ac, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
break;
}
case 0x19: { /* DPSQX_S.W.PH */
- DIP("dpsqx_s.w.ph ac%d, r%d, r%d", ac, rs, rt);
+ DIP("dpsqx_s.w.ph ac%u, r%u, r%u", ac, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
break;
}
case 0x1A: { /* DPAQX_SA.W.PH */
- DIP("dpaqx_sa.w.ph ac%d, r%d, r%d", ac, rs, rt);
+ DIP("dpaqx_sa.w.ph ac%u, r%u, r%u", ac, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
break;
}
case 0x1B: { /* DPSQX_SA.W.PH */
- DIP("dpsqx_sa.w.ph ac%d, r%d, r%d", ac, rs, rt);
+ DIP("dpsqx_sa.w.ph ac%u, r%u, r%u", ac, rs, rt);
vassert(!mode64);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
case 0x31: { /* APPEND */
switch(sa) {
case 0x0: { /* APPEND */
- DIP("append r%d, r%d, %d", rt, rs, rd);
+ DIP("append r%u, r%u, %u", rt, rs, rd);
vassert(!mode64);
t1 = newTemp(Ity_I32);
t2 = newTemp(Ity_I32);
break;
}
case 0x1: { /* PREPEND */
- DIP("prepend r%d, r%d, %d", rt, rs, rd);
+ DIP("prepend r%u, r%u, %u", rt, rs, rd);
vassert(!mode64);
t1 = newTemp(Ity_I32);
t2 = newTemp(Ity_I32);
break;
}
case 0x10: { /* BALIGN */
- DIP("balign r%d, r%d, %d", rt, rs, rd);
+ DIP("balign r%u, r%u, %u", rt, rs, rd);
vassert(!mode64);
t1 = newTemp(Ity_I32);
t2 = newTemp(Ity_I32);
const UChar *code = guest_code + delta;
cins = getUInt(code);
- DIP("\t0x%lx:\t0x%08x\t", (long)guest_PC_curr_instr, cins);
+ DIP("\t0x%llx:\t0x%08x\t", (Addr64)guest_PC_curr_instr, cins);
if (delta != 0) {
if (branch_or_jump(guest_code + delta - 4)) {
case 0x11: { /* COP1 */
if (fmt == 0x3 && fd == 0 && function == 0) { /* MFHC1 */
- DIP("mfhc1 r%d, f%d", rt, fs);
+ DIP("mfhc1 r%u, f%u", rt, fs);
if (fp_mode64) {
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I32);
}
break;
} else if (fmt == 0x7 && fd == 0 && function == 0) { /* MTHC1 */
- DIP("mthc1 r%d, f%d", rt, fs);
+ DIP("mthc1 r%u, f%u", rt, fs);
if (fp_mode64) {
t0 = newTemp(Ity_I64);
assign(t0, binop(Iop_32HLto64, getIReg(rt),
if (tf == 1 && nd == 0) {
/* branch on true */
- DIP("bc1t %d, %d", bc1_cc, imm);
+ DIP("bc1t %u, %u", bc1_cc, imm);
assign(t3, binop(Iop_CmpEQ32, mkU32(1), mkexpr(t2)));
dis_branch(False, mkexpr(t3), imm, &bstmt);
break;
} else if (tf == 0 && nd == 0) {
/* branch on false */
- DIP("bc1f %d, %d", bc1_cc, imm);
+ DIP("bc1f %u, %u", bc1_cc, imm);
assign(t3, binop(Iop_CmpEQ32, mkU32(0), mkexpr(t2)));
dis_branch(False, mkexpr(t3), imm, &bstmt);
break;
} else if (nd == 1 && tf == 0) {
- DIP("bc1fl %d, %d", bc1_cc, imm);
+ DIP("bc1fl %u, %u", bc1_cc, imm);
lastn = dis_branch_likely(binop(Iop_CmpNE32, mkexpr(t2),
mkU32(0x0)), imm);
break;
} else if (nd == 1 && tf == 1) {
- DIP("bc1tl %d, %d", bc1_cc, imm);
+ DIP("bc1tl %u, %u", bc1_cc, imm);
lastn = dis_branch_likely(binop(Iop_CmpEQ32, mkexpr(t2),
mkU32(0x0)), imm);
break;
case 0x5: /* abs.fmt */
switch (fmt) {
case 0x10: /* S */
- DIP("abs.s f%d, f%d", fd, fs);
+ DIP("abs.s f%u, f%u", fd, fs);
putFReg(fd, mkWidenFromF32(tyF, unop(Iop_AbsF32,
getLoFromF64(tyF, getFReg(fs)))));
break;
case 0x11: /* D */
- DIP("abs.d f%d, f%d", fd, fs);
+ DIP("abs.d f%u, f%u", fd, fs);
putDReg(fd, unop(Iop_AbsF64, getDReg(fs)));
break;
default:
case 0x02: /* MUL.fmt */
switch (fmt) {
case 0x11: { /* D */
- DIP("mul.d f%d, f%d, f%d", fd, fs, ft);
+ DIP("mul.d f%u, f%u, f%u", fd, fs, ft);
IRExpr *rm = get_IR_roundingmode();
putDReg(fd, triop(Iop_MulF64, rm, getDReg(fs),
getDReg(ft)));
break;
}
case 0x10: { /* S */
- DIP("mul.s f%d, f%d, f%d", fd, fs, ft);
+ DIP("mul.s f%u, f%u, f%u", fd, fs, ft);
IRExpr *rm = get_IR_roundingmode();
putFReg(fd, mkWidenFromF32(tyF, triop(Iop_MulF32, rm,
getLoFromF64(tyF, getFReg(fs)),
case 0x03: /* DIV.fmt */
switch (fmt) {
case 0x11: { /* D */
- DIP("div.d f%d, f%d, f%d", fd, fs, ft);
+ DIP("div.d f%u, f%u, f%u", fd, fs, ft);
IRExpr *rm = get_IR_roundingmode();
putDReg(fd, triop(Iop_DivF64, rm, getDReg(fs),
getDReg(ft)));
break;
}
case 0x10: { /* S */
- DIP("div.s f%d, f%d, f%d", fd, fs, ft);
+ DIP("div.s f%u, f%u, f%u", fd, fs, ft);
calculateFCSR(fs, ft, DIVS, False, 2);
IRExpr *rm = get_IR_roundingmode();
putFReg(fd, mkWidenFromF32(tyF, triop(Iop_DivF32, rm,
case 0x01: /* SUB.fmt */
switch (fmt) {
case 0x11: { /* D */
- DIP("sub.d f%d, f%d, f%d", fd, fs, ft);
+ DIP("sub.d f%u, f%u, f%u", fd, fs, ft);
calculateFCSR(fs, ft, SUBD, False, 2);
IRExpr *rm = get_IR_roundingmode();
putDReg(fd, triop(Iop_SubF64, rm, getDReg(fs),
break;
}
case 0x10: { /* S */
- DIP("sub.s f%d, f%d, f%d", fd, fs, ft);
+ DIP("sub.s f%u, f%u, f%u", fd, fs, ft);
calculateFCSR(fs, ft, SUBS, True, 2);
IRExpr *rm = get_IR_roundingmode();
putFReg(fd, mkWidenFromF32(tyF, triop(Iop_SubF32, rm,
case 0x06: /* MOV.fmt */
switch (fmt) {
case 0x11: /* D */
- DIP("mov.d f%d, f%d", fd, fs);
+ DIP("mov.d f%u, f%u", fd, fs);
if (fp_mode64) {
putDReg(fd, getDReg(fs));
} else {
}
break;
case 0x10: /* S */
- DIP("mov.s f%d, f%d", fd, fs);
+ DIP("mov.s f%u, f%u", fd, fs);
putFReg(fd, getFReg(fs));
break;
default:
case 0x7: /* neg.fmt */
switch (fmt) {
case 0x10: /* S */
- DIP("neg.s f%d, f%d", fd, fs);
+ DIP("neg.s f%u, f%u", fd, fs);
putFReg(fd, mkWidenFromF32(tyF, unop(Iop_NegF32,
getLoFromF64(tyF, getFReg(fs)))));
break;
case 0x11: /* D */
- DIP("neg.d f%d, f%d", fd, fs);
+ DIP("neg.d f%u, f%u", fd, fs);
putDReg(fd, unop(Iop_NegF64, getDReg(fs)));
break;
default:
case 0x08: /* ROUND.L.fmt */
switch (fmt) {
case 0x10: /* S */
- DIP("round.l.s f%d, f%d", fd, fs);
+ DIP("round.l.s f%u, f%u", fd, fs);
if (fp_mode64) {
calculateFCSR(fs, 0, ROUNDLS, True, 1);
t0 = newTemp(Ity_I64);
}
break;
case 0x11: /* D */
- DIP("round.l.d f%d, f%d", fd, fs);
+ DIP("round.l.d f%u, f%u", fd, fs);
if (fp_mode64) {
calculateFCSR(fs, 0, ROUNDLD, False, 1);
putDReg(fd, binop(Iop_RoundF64toInt, mkU32(0x0),
case 0x09: /* TRUNC.L.fmt */
switch (fmt) {
case 0x10: /* S */
- DIP("trunc.l.s f%d, f%d", fd, fs);
+ DIP("trunc.l.s f%u, f%u", fd, fs);
if (fp_mode64) {
calculateFCSR(fs, 0, TRUNCLS, True, 1);
t0 = newTemp(Ity_I64);
}
break;
case 0x11: /* D */
- DIP("trunc.l.d f%d, f%d", fd, fs);
+ DIP("trunc.l.d f%u, f%u", fd, fs);
if (fp_mode64) {
calculateFCSR(fs, 0, TRUNCLD, False, 1);
putDReg(fd, binop(Iop_RoundF64toInt, mkU32(0x3),
case 0x15: /* RECIP.fmt */
switch (fmt) {
case 0x10: { /* S */
- DIP("recip.s f%d, f%d", fd, fs);
+ DIP("recip.s f%u, f%u", fd, fs);
IRExpr *rm = get_IR_roundingmode();
putFReg(fd, mkWidenFromF32(tyF, triop(Iop_DivF32,
rm, unop(Iop_ReinterpI32asF32,
break;
}
case 0x11: { /* D */
- DIP("recip.d f%d, f%d", fd, fs);
+ DIP("recip.d f%u, f%u", fd, fs);
IRExpr *rm = get_IR_roundingmode();
/* putDReg(fd, 1.0/getDreg(fs)); */
putDReg(fd, triop(Iop_DivF64, rm,
case 0x13: /* MOVN.fmt */
switch (fmt) {
case 0x10: /* S */
- DIP("movn.s f%d, f%d, r%d", fd, fs, rt);
+ DIP("movn.s f%u, f%u, r%u", fd, fs, rt);
t1 = newTemp(Ity_F64);
t2 = newTemp(Ity_F64);
t3 = newTemp(Ity_I1);
mkexpr(t4)));
break;
case 0x11: /* D */
- DIP("movn.d f%d, f%d, r%d", fd, fs, rt);
+ DIP("movn.d f%u, f%u, r%u", fd, fs, rt);
t3 = newTemp(Ity_I1);
t4 = newTemp(Ity_F64);
case 0x12: /* MOVZ.fmt */
switch (fmt) {
case 0x10: /* S */
- DIP("movz.s f%d, f%d, r%d", fd, fs, rt);
+ DIP("movz.s f%u, f%u, r%u", fd, fs, rt);
t1 = newTemp(Ity_F64);
t2 = newTemp(Ity_F64);
break;
case 0x11: /* D */
- DIP("movz.d f%d, f%d, r%d", fd, fs, rt);
+ DIP("movz.d f%u, f%u, r%u", fd, fs, rt);
t3 = newTemp(Ity_I1);
t4 = newTemp(Ity_F64);
if (mode64)
UInt mov_cc = get_mov_cc(cins);
switch (fmt) { /* MOVCF = 010001 */
case 0x11: /* D */
- DIP("movt.d f%d, f%d, %d", fd, fs, mov_cc);
+ DIP("movt.d f%u, f%u, %u", fd, fs, mov_cc);
t1 = newTemp(Ity_I1);
t2 = newTemp(Ity_I32);
t3 = newTemp(Ity_I1);
putDReg(fd, mkexpr(t4));
break;
case 0x10: /* S */
- DIP("movt.s f%d, f%d, %d", fd, fs, mov_cc);
+ DIP("movt.s f%u, f%u, %u", fd, fs, mov_cc);
t1 = newTemp(Ity_I1);
t2 = newTemp(Ity_I32);
t3 = newTemp(Ity_I1);
switch (fmt) /* MOVCF = 010001 */
{
case 0x11: /* D */
- DIP("movf.d f%d, f%d, %d", fd, fs, mov_cc);
+ DIP("movf.d f%u, f%u, %u", fd, fs, mov_cc);
t1 = newTemp(Ity_I1);
t2 = newTemp(Ity_I32);
t3 = newTemp(Ity_I1);
putDReg(fd, mkexpr(t4));
break;
case 0x10: /* S */
- DIP("movf.s f%d, f%d, %d", fd, fs, mov_cc);
+ DIP("movf.s f%u, f%u, %u", fd, fs, mov_cc);
t1 = newTemp(Ity_I1);
t2 = newTemp(Ity_I32);
t3 = newTemp(Ity_I1);
case 0x0: /* add.fmt */
switch (fmt) {
case 0x10: { /* S */
- DIP("add.s f%d, f%d, f%d", fd, fs, ft);
+ DIP("add.s f%u, f%u, f%u", fd, fs, ft);
calculateFCSR(fs, ft, ADDS, True, 2);
IRExpr *rm = get_IR_roundingmode();
putFReg(fd, mkWidenFromF32(tyF, triop(Iop_AddF32, rm,
break;
}
case 0x11: { /* D */
- DIP("add.d f%d, f%d, f%d", fd, fs, ft);
+ DIP("add.d f%u, f%u, f%u", fd, fs, ft);
calculateFCSR(fs, ft, ADDD, False, 2);
IRExpr *rm = get_IR_roundingmode();
putDReg(fd, triop(Iop_AddF64, rm, getDReg(fs), getDReg(ft)));
}
case 0x4: /* MTC1 (Move Word to Floating Point) */
- DIP("mtc1 r%d, f%d", rt, fs);
+ DIP("mtc1 r%u, f%u", rt, fs);
if (fp_mode64) {
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_F32);
break;
case 0x5: /* Doubleword Move to Floating Point DMTC1; MIPS64 */
- DIP("dmtc1 r%d, f%d", rt, fs);
+ DIP("dmtc1 r%u, f%u", rt, fs);
vassert(mode64);
putFReg(fs, unop(Iop_ReinterpI64asF64, getIReg(rt)));
break;
case 0x0: /* MFC1 */
- DIP("mfc1 r%d, f%d", rt, fs);
+ DIP("mfc1 r%u, f%u", rt, fs);
if (fp_mode64) {
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I32);
case 0x1: /* Doubleword Move from Floating Point DMFC1;
MIPS64 */
- DIP("dmfc1 r%d, f%d", rt, fs);
+ DIP("dmfc1 r%u, f%u", rt, fs);
putIReg(rt, unop(Iop_ReinterpF64asI64, getFReg(fs)));
break;
case 0x6: /* CTC1 */
- DIP("ctc1 r%d, f%d", rt, fs);
+ DIP("ctc1 r%u, f%u", rt, fs);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
t2 = newTemp(Ity_I32);
}
break;
case 0x2: /* CFC1 */
- DIP("cfc1 r%d, f%d", rt, fs);
+ DIP("cfc1 r%u, f%u", rt, fs);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
t2 = newTemp(Ity_I32);
case 0x21: /* CVT.D */
switch (fmt) {
case 0x10: /* S */
- DIP("cvt.d.s f%d, f%d", fd, fs);
+ DIP("cvt.d.s f%u, f%u", fd, fs);
calculateFCSR(fs, 0, CVTDS, True, 1);
if (fp_mode64) {
t0 = newTemp(Ity_I64);
break;
case 0x14:
- DIP("cvt.d.w %d, %d", fd, fs);
+ DIP("cvt.d.w %u, %u", fd, fs);
calculateFCSR(fs, 0, CVTDW, True, 1);
if (fp_mode64) {
t0 = newTemp(Ity_I64);
case 0x15: { /* L */
if (fp_mode64) {
- DIP("cvt.d.l %d, %d", fd, fs);
+ DIP("cvt.d.l %u, %u", fd, fs);
calculateFCSR(fs, 0, CVTDL, False, 1);
t0 = newTemp(Ity_I64);
assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
case 0x20: /* cvt.s */
switch (fmt) {
case 0x14: /* W */
- DIP("cvt.s.w %d, %d", fd, fs);
+ DIP("cvt.s.w %u, %u", fd, fs);
calculateFCSR(fs, 0, CVTSW, True, 1);
if (fp_mode64) {
t0 = newTemp(Ity_I64);
break;
case 0x11: /* D */
- DIP("cvt.s.d %d, %d", fd, fs);
+ DIP("cvt.s.d %u, %u", fd, fs);
calculateFCSR(fs, 0, CVTSD, False, 1);
t0 = newTemp(Ity_F32);
assign(t0, binop(Iop_F64toF32, get_IR_roundingmode(),
break;
case 0x15: /* L */
- DIP("cvt.s.l %d, %d", fd, fs);
+ DIP("cvt.s.l %u, %u", fd, fs);
calculateFCSR(fs, 0, CVTSL, False, 1);
t0 = newTemp(Ity_I64);
assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
case 0x24: /* cvt.w */
switch (fmt) {
case 0x10: /* S */
- DIP("cvt.w.s %d, %d", fd, fs);
+ DIP("cvt.w.s %u, %u", fd, fs);
calculateFCSR(fs, 0, CVTWS, True, 1);
putFReg(fd,
mkWidenFromF32(tyF,
break;
case 0x11:
- DIP("cvt.w.d %d, %d", fd, fs);
+ DIP("cvt.w.d %u, %u", fd, fs);
calculateFCSR(fs, 0, CVTWD, False, 1);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_F32);
case 0x25: /* cvt.l */
switch (fmt) {
case 0x10: /* S */
- DIP("cvt.l.s %d, %d", fd, fs);
+ DIP("cvt.l.s %u, %u", fd, fs);
if (fp_mode64) {
calculateFCSR(fs, 0, CVTLS, True, 1);
t0 = newTemp(Ity_I64);
break;
case 0x11: { /* D */
- DIP("cvt.l.d %d, %d", fd, fs);
+ DIP("cvt.l.d %u, %u", fd, fs);
if (fp_mode64) {
calculateFCSR(fs, 0, CVTLD, False, 1);
putDReg(fd, binop(Iop_RoundF64toInt,
case 0x0B: /* FLOOR.L.fmt */
switch (fmt) {
case 0x10: /* S */
- DIP("floor.l.s %d, %d", fd, fs);
+ DIP("floor.l.s %u, %u", fd, fs);
if (fp_mode64) {
calculateFCSR(fs, 0, FLOORLS, True, 1);
t0 = newTemp(Ity_I64);
break;
case 0x11: /* D */
- DIP("floor.l.d %d, %d", fd, fs);
+ DIP("floor.l.d %u, %u", fd, fs);
if (fp_mode64) {
calculateFCSR(fs, 0, FLOORLD, False, 1);
putDReg(fd, binop(Iop_RoundF64toInt, mkU32(0x1),
case 0x0C: /* ROUND.W.fmt */
switch (fmt) {
case 0x10: /* S */
- DIP("round.w.s f%d, f%d", fd, fs);
+ DIP("round.w.s f%u, f%u", fd, fs);
calculateFCSR(fs, 0, ROUNDWS, True, 1);
if (fp_mode64) {
t0 = newTemp(Ity_I64);
break;
case 0x11: /* D */
- DIP("round.w.d f%d, f%d", fd, fs);
+ DIP("round.w.d f%u, f%u", fd, fs);
calculateFCSR(fs, 0, ROUNDWD, False, 1);
if (fp_mode64) {
t0 = newTemp(Ity_I32);
case 0x0F: /* FLOOR.W.fmt */
switch (fmt) {
case 0x10: /* S */
- DIP("floor.w.s f%d, f%d", fd, fs);
+ DIP("floor.w.s f%u, f%u", fd, fs);
calculateFCSR(fs, 0, FLOORWS, True, 1);
if (fp_mode64) {
t0 = newTemp(Ity_I64);
break;
case 0x11: /* D */
- DIP("floor.w.d f%d, f%d", fd, fs);
+ DIP("floor.w.d f%u, f%u", fd, fs);
calculateFCSR(fs, 0, FLOORWD, False, 1);
if (fp_mode64) {
t0 = newTemp(Ity_I32);
case 0x0D: /* TRUNC.W */
switch (fmt) {
case 0x10: /* S */
- DIP("trunc.w.s %d, %d", fd, fs);
+ DIP("trunc.w.s %u, %u", fd, fs);
calculateFCSR(fs, 0, TRUNCWS, True, 1);
if (fp_mode64) {
t0 = newTemp(Ity_I64);
getFReg(fs)));
break;
case 0x11: /* D */
- DIP("trunc.w.d %d, %d", fd, fs);
+ DIP("trunc.w.d %u, %u", fd, fs);
calculateFCSR(fs, 0, TRUNCWD, False, 1);
if (fp_mode64) {
t0 = newTemp(Ity_I32);
case 0x0E: /* CEIL.W.fmt */
switch (fmt) {
case 0x10: /* S */
- DIP("ceil.w.s %d, %d", fd, fs);
+ DIP("ceil.w.s %u, %u", fd, fs);
calculateFCSR(fs, 0, CEILWS, True, 1);
if (fp_mode64) {
t0 = newTemp(Ity_I64);
break;
case 0x11: /* D */
- DIP("ceil.w.d %d, %d", fd, fs);
+ DIP("ceil.w.d %u, %u", fd, fs);
calculateFCSR(fs, 0, CEILWD, False, 1);
if (!fp_mode64) {
t0 = newTemp(Ity_I32);
case 0x0A: /* CEIL.L.fmt */
switch (fmt) {
case 0x10: /* S */
- DIP("ceil.l.s %d, %d", fd, fs);
+ DIP("ceil.l.s %u, %u", fd, fs);
if (fp_mode64) {
calculateFCSR(fs, 0, CEILLS, True, 1);
t0 = newTemp(Ity_I64);
break;
case 0x11: /* D */
- DIP("ceil.l.d %d, %d", fd, fs);
+ DIP("ceil.l.d %u, %u", fd, fs);
if (fp_mode64) {
calculateFCSR(fs, 0, CEILLD, False, 1);
putDReg(fd, binop(Iop_RoundF64toInt, mkU32(0x2),
case 0x16: /* RSQRT.fmt */
switch (fmt) {
case 0x10: { /* S */
- DIP("rsqrt.s %d, %d", fd, fs);
+ DIP("rsqrt.s %u, %u", fd, fs);
IRExpr *rm = get_IR_roundingmode();
putFReg(fd, mkWidenFromF32(tyF, triop(Iop_DivF32, rm,
unop(Iop_ReinterpI32asF32, mkU32(ONE_SINGLE)),
break;
}
case 0x11: { /* D */
- DIP("rsqrt.d %d, %d", fd, fs);
+ DIP("rsqrt.d %u, %u", fd, fs);
IRExpr *rm = get_IR_roundingmode();
putDReg(fd, triop(Iop_DivF64, rm,
unop(Iop_ReinterpI64asF64,
break; /* COP1 */
case 0x10: /* COP0 */
if (rs == 0) { /* MFC0 */
- DIP("mfc0 r%d, r%d, %d", rt, rd, sel);
+ DIP("mfc0 r%u, r%u, %u", rt, rd, sel);
IRTemp val = newTemp(Ity_I32);
IRExpr** args = mkIRExprVec_3 (IRExpr_BBPTR(), mkU32(rd), mkU32(sel));
IRDirty *d = unsafeIRDirty_1_N(val,
putIReg(rt, mkexpr(val));
} else if (rs == 1) {
/* Doubleword Move from Coprocessor 0 - DMFC0; MIPS64 */
- DIP("dmfc0 r%d, r%d, %d", rt, rd, sel);
+ DIP("dmfc0 r%u, r%u, %u", rt, rd, sel);
IRTemp val = newTemp(Ity_I64);
IRExpr** args = mkIRExprVec_3 (IRExpr_BBPTR(), mkU64(rd), mkU64(sel));
IRDirty *d = unsafeIRDirty_1_N(val,
case 0x31: /* LWC1 */
/* Load Word to Floating Point - LWC1 (MIPS32) */
- DIP("lwc1 f%d, %d(r%d)", ft, imm, rs);
+ DIP("lwc1 f%u, %u(r%u)", ft, imm, rs);
if (fp_mode64) {
t1 = newTemp(Ity_F32);
t2 = newTemp(Ity_I64);
break;
case 0x39: /* SWC1 */
- DIP("swc1 f%d, %d(r%d)", ft, imm, rs);
+ DIP("swc1 f%u, %u(r%u)", ft, imm, rs);
if (fp_mode64) {
t0 = newTemp(Ity_I64);
t2 = newTemp(Ity_I32);
case 0x35:
/* Load Doubleword to Floating Point - LDC1 (MIPS32) */
- DIP("ldc1 f%d, %d(%d)", rt, imm, rs);
+ DIP("ldc1 f%u, %u(%u)", rt, imm, rs);
LOAD_STORE_PATTERN;
putDReg(ft, load(Ity_F64, mkexpr(t1)));
break;
case 0x3D:
/* Store Doubleword from Floating Point - SDC1 */
- DIP("sdc1 f%d, %d(%d)", ft, imm, rs);
+ DIP("sdc1 f%u, %u(%u)", ft, imm, rs);
LOAD_STORE_PATTERN;
store(mkexpr(t1), getDReg(ft));
break;
case 0x23: /* LW */
- DIP("lw r%d, %d(r%d)", rt, imm, rs);
+ DIP("lw r%u, %u(r%u)", rt, imm, rs);
LOAD_STORE_PATTERN;
putIReg(rt, mkWidenFrom32(ty, load(Ity_I32, mkexpr(t1)), True));
break;
case 0x20: /* LB */
- DIP("lb r%d, %d(r%d)", rt, imm, rs);
+ DIP("lb r%u, %u(r%u)", rt, imm, rs);
LOAD_STORE_PATTERN;
if (mode64)
putIReg(rt, unop(Iop_8Sto64, load(Ity_I8, mkexpr(t1))));
break;
case 0x24: /* LBU */
- DIP("lbu r%d, %d(r%d)", rt, imm, rs);
+ DIP("lbu r%u, %u(r%u)", rt, imm, rs);
LOAD_STORE_PATTERN;
if (mode64)
putIReg(rt, unop(Iop_8Uto64, load(Ity_I8, mkexpr(t1))));
break;
case 0x21: /* LH */
- DIP("lh r%d, %d(r%d)", rt, imm, rs);
+ DIP("lh r%u, %u(r%u)", rt, imm, rs);
LOAD_STORE_PATTERN;
if (mode64)
putIReg(rt, unop(Iop_16Sto64, load(Ity_I16, mkexpr(t1))));
break;
case 0x25: /* LHU */
- DIP("lhu r%d, %d(r%d)", rt, imm, rs);
+ DIP("lhu r%u, %u(r%u)", rt, imm, rs);
LOAD_STORE_PATTERN;
if (mode64)
putIReg(rt, unop(Iop_16Uto64, load(Ity_I16, mkexpr(t1))));
case 0x0F: /* LUI */
p = (imm << 16);
- DIP("lui r%d, imm: 0x%x", rt, imm);
+ DIP("lui r%u, imm: 0x%x", rt, imm);
if (mode64)
putIReg(rt, mkU64(extend_s_32to64(p)));
else
switch (function) {
case 0x0: { /* LWXC1 */
/* Load Word Indexed to Floating Point - LWXC1 (MIPS32r2) */
- DIP("lwxc1 f%d, r%d(r%d)", fd, rt, rs);
+ DIP("lwxc1 f%u, r%u(r%u)", fd, rt, rs);
if (fp_mode64) {
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I32);
/* Load Doubleword Indexed to Floating Point
LDXC1 (MIPS32r2 and MIPS64) */
if (fp_mode64) {
- DIP("ldxc1 f%d, r%d(r%d)", fd, rt, rs);
+ DIP("ldxc1 f%u, r%u(r%u)", fd, rt, rs);
t0 = newTemp(ty);
assign(t0, binop(mode64 ? Iop_Add64 : Iop_Add32, getIReg(rs),
getIReg(rt)));
case 0x5: /* Load Doubleword Indexed Unaligned to Floating Point - LUXC1;
MIPS32r2 */
- DIP("luxc1 f%d, r%d(r%d)", fd, rt, rs);
+ DIP("luxc1 f%u, r%u(r%u)", fd, rt, rs);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
assign(t0, binop(Iop_Add64, getIReg(rs), getIReg(rt)));
break;
case 0x8: { /* Store Word Indexed from Floating Point - SWXC1 */
- DIP("swxc1 f%d, r%d(r%d)", ft, rt, rs);
+ DIP("swxc1 f%u, r%u(r%u)", ft, rt, rs);
if (fp_mode64) {
t0 = newTemp(ty);
assign(t0, binop(mode64 ? Iop_Add64 : Iop_Add32, getIReg(rs),
break;
}
case 0x9: { /* Store Doubleword Indexed from Floating Point - SDXC1 */
- DIP("sdc1 f%d, %d(%d)", ft, imm, rs);
+ DIP("sdc1 f%u, %u(%u)", ft, imm, rs);
if (fp_mode64) {
t0 = newTemp(ty);
assign(t0, binop(mode64 ? Iop_Add64 : Iop_Add32, getIReg(rs),
}
case 0xD: /* Store Doubleword Indexed Unaligned from Floating Point -
SUXC1; MIPS64 MIPS32r2 */
- DIP("suxc1 f%d, r%d(r%d)", fd, rt, rs);
+ DIP("suxc1 f%u, r%u(r%u)", fd, rt, rs);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I64);
assign(t0, binop(Iop_Add64, getIReg(rs), getIReg(rt)));
break;
}
case 0x20: { /* MADD.S */
- DIP("madd.s f%d, f%d, f%d, f%d", fd, fmt, fs, ft);
+ DIP("madd.s f%u, f%u, f%u, f%u", fd, fmt, fs, ft);
IRExpr *rm = get_IR_roundingmode();
t1 = newTemp(Ity_F32);
assign(t1, qop(Iop_MAddF32, rm,
break; /* MADD.S */
}
case 0x21: { /* MADD.D */
- DIP("madd.d f%d, f%d, f%d, f%d", fd, fmt, fs, ft);
+ DIP("madd.d f%u, f%u, f%u, f%u", fd, fmt, fs, ft);
IRExpr *rm = get_IR_roundingmode();
putDReg(fd, qop(Iop_MAddF64, rm, getDReg(fmt), getDReg(fs),
getDReg(ft)));
break; /* MADD.D */
}
case 0x28: { /* MSUB.S */
- DIP("msub.s f%d, f%d, f%d, f%d", fd, fmt, fs, ft);
+ DIP("msub.s f%u, f%u, f%u, f%u", fd, fmt, fs, ft);
IRExpr *rm = get_IR_roundingmode();
t1 = newTemp(Ity_F32);
assign(t1, qop(Iop_MSubF32, rm,
break; /* MSUB.S */
}
case 0x29: { /* MSUB.D */
- DIP("msub.d f%d, f%d, f%d, f%d", fd, fmt, fs, ft);
+ DIP("msub.d f%u, f%u, f%u, f%u", fd, fmt, fs, ft);
IRExpr *rm = get_IR_roundingmode();
putDReg(fd, qop(Iop_MSubF64, rm, getDReg(fmt), getDReg(fs),
getDReg(ft)));
break; /* MSUB.D */
}
case 0x30: { /* NMADD.S */
- DIP("nmadd.s f%d, f%d, f%d, f%d", fd, fmt, fs, ft);
+ DIP("nmadd.s f%u, f%u, f%u, f%u", fd, fmt, fs, ft);
IRExpr *rm = get_IR_roundingmode();
t1 = newTemp(Ity_F32);
assign(t1, qop(Iop_MAddF32, rm,
break; /* NMADD.S */
}
case 0x31: { /* NMADD.D */
- DIP("nmadd.d f%d, f%d, f%d, f%d", fd, fmt, fs, ft);
+ DIP("nmadd.d f%u, f%u, f%u, f%u", fd, fmt, fs, ft);
IRExpr *rm = get_IR_roundingmode();
t1 = newTemp(Ity_F64);
assign(t1, qop(Iop_MAddF64, rm, getDReg(fmt), getDReg(fs),
break; /* NMADD.D */
}
case 0x38: { /* NMSUBB.S */
- DIP("nmsub.s f%d, f%d, f%d, f%d", fd, fmt, fs, ft);
+ DIP("nmsub.s f%u, f%u, f%u, f%u", fd, fmt, fs, ft);
IRExpr *rm = get_IR_roundingmode();
t1 = newTemp(Ity_F32);
assign(t1, qop(Iop_MSubF32, rm,
break; /* NMSUBB.S */
}
case 0x39: { /* NMSUBB.D */
- DIP("nmsub.d f%d, f%d, f%d, f%d", fd, fmt, fs, ft);
+ DIP("nmsub.d f%u, f%u, f%u, f%u", fd, fmt, fs, ft);
IRExpr *rm = get_IR_roundingmode();
t1 = newTemp(Ity_F64);
assign(t1, qop(Iop_MSubF64, rm, getDReg(fmt), getDReg(fs),
break;
case 0x22: /* LWL */
- DIP("lwl r%d, %d(r%d)", rt, imm, rs);
+ DIP("lwl r%u, %u(r%u)", rt, imm, rs);
if (mode64) {
/* t1 = addr */
t1 = newTemp(Ity_I64);
break;
case 0x26: /* LWR */
- DIP("lwr r%d, %d(r%d)", rt, imm, rs);
+ DIP("lwr r%u, %u(r%u)", rt, imm, rs);
if (mode64) {
/* t1 = addr */
t1 = newTemp(Ity_I64);
break;
case 0x2B: /* SW */
- DIP("sw r%d, %d(r%d)", rt, imm, rs);
+ DIP("sw r%u, %u(r%u)", rt, imm, rs);
LOAD_STORE_PATTERN;
store(mkexpr(t1), mkNarrowTo32(ty, getIReg(rt)));
break;
case 0x2C: { /* SDL rt, offset(base) MIPS64 */
- DIP("sdl r%u, %d(r%u)", rt, (Int) imm, rs);
+ DIP("sdl r%u, %u(r%u)", rt, imm, rs);
vassert(mode64);
IRTemp A_byte = newTemp(Ity_I8);
IRTemp B_byte = newTemp(Ity_I8);
case 0x2D: {
/* SDR rt, offset(base) - MIPS64 */
vassert(mode64);
- DIP("sdr r%u, %d(r%u)", rt, imm, rs);
+ DIP("sdr r%u, %u(r%u)", rt, imm, rs);
IRTemp A_byte = newTemp(Ity_I8);
IRTemp B_byte = newTemp(Ity_I8);
IRTemp C_byte = newTemp(Ity_I8);
}
case 0x28: /* SB */
- DIP("sb r%d, %d(r%d)", rt, imm, rs);
+ DIP("sb r%u, %u(r%u)", rt, imm, rs);
LOAD_STORE_PATTERN;
store(mkexpr(t1), narrowTo(Ity_I8, getIReg(rt)));
break;
case 0x29: /* SH */
- DIP("sh r%d, %d(r%d)", rt, imm, rs);
+ DIP("sh r%u, %u(r%u)", rt, imm, rs);
LOAD_STORE_PATTERN;
store(mkexpr(t1), narrowTo(Ity_I16, getIReg(rt)));
break;
case 0x2A: /* SWL */
- DIP("swl r%d, %d(r%d)", rt, imm, rs);
+ DIP("swl r%u, %u(r%u)", rt, imm, rs);
if (mode64) {
IRTemp E_byte = newTemp(Ity_I8);
IRTemp F_byte = newTemp(Ity_I8);
break;
case 0x2E: /* SWR */
- DIP("swr r%d, %d(r%d)", rt, imm, rs);
+ DIP("swr r%u, %u(r%u)", rt, imm, rs);
if (mode64) {
IRTemp E_byte = newTemp(Ity_I8);
IRTemp F_byte = newTemp(Ity_I8);
break;
case 0x02: { /* MUL */
- DIP("mul r%d, r%d, r%d", rd, rs, rt);
+ DIP("mul r%u, r%u, r%u", rd, rs, rt);
if (mode64) {
IRTemp tmpRs32 = newTemp(Ity_I32);
IRTemp tmpRt32 = newTemp(Ity_I32);
case 0x00: { /* MADD */
if (mode64) {
- DIP("madd r%d, r%d", rs, rt);
+ DIP("madd r%u, r%u", rs, rt);
t1 = newTemp(Ity_I32);
t2 = newTemp(Ity_I32);
t3 = newTemp(Ity_I64);
goto decode_failure_dsp;
}
} else {
- DIP("madd r%d, r%d", rs, rt);
+ DIP("madd r%u, r%u", rs, rt);
t1 = newTemp(Ity_I32);
t2 = newTemp(Ity_I32);
t3 = newTemp(Ity_I64);
case 0x01: { /* MADDU */
if (mode64) {
- DIP("maddu r%d, r%d", rs, rt);
+ DIP("maddu r%u, r%u", rs, rt);
t1 = newTemp(Ity_I32);
t2 = newTemp(Ity_I32);
t3 = newTemp(Ity_I64);
goto decode_failure_dsp;
}
} else {
- DIP("maddu r%d, r%d", rs, rt);
+ DIP("maddu r%u, r%u", rs, rt);
t1 = newTemp(Ity_I32);
t2 = newTemp(Ity_I32);
t3 = newTemp(Ity_I64);
case 0x04: { /* MSUB */
if (mode64) {
- DIP("msub r%d, r%d", rs, rt);
+ DIP("msub r%u, r%u", rs, rt);
t1 = newTemp(Ity_I32);
t2 = newTemp(Ity_I32);
t3 = newTemp(Ity_I64);
goto decode_failure_dsp;
}
} else {
- DIP("msub r%d, r%d", rs, rt);
+ DIP("msub r%u, r%u", rs, rt);
t1 = newTemp(Ity_I32);
t2 = newTemp(Ity_I32);
t3 = newTemp(Ity_I64);
case 0x05: { /* MSUBU */
if (mode64) {
- DIP("msubu r%d, r%d", rs, rt);
+ DIP("msubu r%u, r%u", rs, rt);
t1 = newTemp(Ity_I32);
t2 = newTemp(Ity_I32);
t3 = newTemp(Ity_I64);
goto decode_failure_dsp;
}
} else {
- DIP("msubu r%d, r%d", rs, rt);
+ DIP("msubu r%u, r%u", rs, rt);
t1 = newTemp(Ity_I32);
t2 = newTemp(Ity_I32);
t3 = newTemp(Ity_I64);
break;
case 0x20: { /* CLZ */
- DIP("clz r%d, r%d", rd, rs);
+ DIP("clz r%u, r%u", rd, rs);
if (mode64) {
IRTemp tmpClz32 = newTemp(Ity_I32);
IRTemp tmpRs32 = newTemp(Ity_I32);
}
case 0x21: { /* CLO */
- DIP("clo r%d, r%d", rd, rs);
+ DIP("clo r%u, r%u", rd, rs);
if (mode64) {
IRTemp tmpClo32 = newTemp(Ity_I32);
IRTemp tmpRs32 = newTemp(Ity_I32);
}
case 0x24: /* Count Leading Zeros in Doubleword - DCLZ; MIPS64 */
- DIP("dclz r%d, r%d", rd, rs);
+ DIP("dclz r%u, r%u", rd, rs);
t1 = newTemp(Ity_I1);
assign(t1, binop(Iop_CmpEQ64, getIReg(rs), mkU64(0)));
putIReg(rd, IRExpr_ITE(mkexpr(t1),
break;
case 0x25: /* Count Leading Ones in Doubleword - DCLO; MIPS64 */
- DIP("dclo r%d, r%d", rd, rs);
+ DIP("dclo r%u, r%u", rd, rs);
t1 = newTemp(Ity_I1);
assign(t1, binop(Iop_CmpEQ64, getIReg(rs),
mkU64(0xffffffffffffffffULL)));
UInt srcPos = lsb;
UInt dstSz = msb + 33;
t1 = newTemp(Ity_I64);
- DIP("dextm r%u, r%u, %d, %d", rt, rs, lsb, msb + 1);
+ DIP("dextm r%u, r%u, %u, %u", rt, rs, lsb, msb + 1);
UChar lsAmt = 64 - (srcPos + dstSz); /* left shift amount; */
UChar rsAmt = 64 - dstSz; /* right shift amount; */
size = msb + 1;
UInt srcPos = lsb + 32;
UInt dstSz = msb + 1;
- DIP("dextu r%u, r%u, %d, %d", rt, rs, srcPos, dstSz);
+ DIP("dextu r%u, r%u, %u, %u", rt, rs, srcPos, dstSz);
t1 = newTemp(Ity_I64);
vassert(srcPos >= 32 && srcPos < 64);
assign(tmpRs, getIReg(rs));
assign(tmpRt, getIReg(rt));
- DIP("dinsm r%u, r%u, %d, %d", rt, rs, lsb, msb);
+ DIP("dinsm r%u, r%u, %u, %u", rt, rs, lsb, msb);
UChar lsAmt = dstPos + srcSz - 1; /* left shift amount; */
UChar rsAmt = dstPos + srcSz - 1; /* right shift amount; */
assign(tmpRs, getIReg(rs));
assign(tmpRt, getIReg(rt));
- DIP("dinsu r%u, r%u, %d, %d", rt, rs, lsb, msb);
+ DIP("dinsu r%u, r%u, %u, %u", rt, rs, lsb, msb);
UChar lsAmt = 64 - srcSz; /* left shift amount; */
UChar rsAmt = 64 - (dstPos + srcSz); /* right shift amount; */
msb = get_msb(cins);
lsb = get_lsb(cins);
size = msb + 1;
- DIP("dins r%u, r%u, %d, %d", rt, rs, lsb,
+ DIP("dins r%u, r%u, %u, %u", rt, rs, lsb,
msb - lsb + 1);
UChar lsAmt = 63 - lsb; /* left shift amount; */
UChar rsAmt = 63 - lsb; /* right shift amount; */
break;
}
default:
- vex_printf("\nop6o10 = %d", lsb);
+ vex_printf("\nop6o10 = %u", lsb);
goto decode_failure;;
}
break;
case 0x3B: { /* RDHWR */
- DIP("rdhwr r%d, r%d", rt, rd);
+ DIP("rdhwr r%u, r%u", rt, rd);
if (rd == 29) {
putIReg(rt, getULR());
#if defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev >= 2))
msb = get_msb(cins);
lsb = get_lsb(cins);
size = msb - lsb + 1;
- DIP("ins size:%d msb:%d lsb:%d", size, msb, lsb);
+ DIP("ins size:%u msb:%u lsb:%u", size, msb, lsb);
vassert(lsb + size <= 32);
vassert(lsb + size > 0);
msb = get_msb(cins);
lsb = get_lsb(cins);
size = msb + 1;
- DIP("ext size:%d msb:%d lsb:%d", size, msb, lsb);
+ DIP("ext size:%u msb:%u lsb:%u", size, msb, lsb);
vassert(lsb + size <= 32);
vassert(lsb + size > 0);
/* put size bits from rs at the top of in temporary */
msb = get_msb(cins);
lsb = get_lsb(cins);
size = msb + 1;
- DIP("dext r%u, r%u, %d, %d", rt, rs, lsb, msb + 1);
+ DIP("dext r%u, r%u, %u, %u", rt, rs, lsb, msb + 1);
t1 = newTemp(Ity_I64);
vassert(lsb >= 0 && lsb < 32);
vassert(size > 0 && size <= 32);
case 0x20: /* BSHFL */
switch (sa) {
case 0x02: /* WSBH */
- DIP("wsbh r%d, r%d", rd, rt);
+ DIP("wsbh r%u, r%u", rd, rt);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
t2 = newTemp(Ity_I32);
break;
case 0x10: /* SEB */
- DIP("seb r%d, r%d", rd, rt);
+ DIP("seb r%u, r%u", rd, rt);
if (mode64)
putIReg(rd, unop(Iop_8Sto64, unop(Iop_64to8, getIReg(rt))));
else
break;
case 0x18: /* SEH */
- DIP("seh r%d, r%d", rd, rt);
+ DIP("seh r%u, r%u", rd, rt);
if (mode64)
putIReg(rd, unop(Iop_16Sto64, unop(Iop_64to16, getIReg(rt))));
else
if (0x3B == function &&
(VEX_MIPS_COMP_ID(archinfo->hwcaps) == VEX_PRID_COMP_BROADCOM)) {
/*RDHWR*/
- DIP("rdhwr r%d, r%d", rt, rd);
+ DIP("rdhwr r%u, r%u", rt, rd);
if (rd == 29) {
putIReg(rt, getULR());
} else
case 0x1: {
UInt mov_cc = get_mov_cc(cins);
if (tf == 0) { /* MOVF */
- DIP("movf r%d, r%d, %d", rd, rs, mov_cc);
+ DIP("movf r%u, r%u, %u", rd, rs, mov_cc);
t1 = newTemp(Ity_I1);
t2 = newTemp(Ity_I32);
t3 = newTemp(Ity_I1);
assign(t3, binop(Iop_CmpEQ32, mkU32(0), mkexpr(t2)));
putIReg(rd, IRExpr_ITE(mkexpr(t3), getIReg(rs), getIReg(rd)));
} else if (tf == 1) { /* MOVT */
- DIP("movt r%d, r%d, %d", rd, rs, mov_cc);
+ DIP("movt r%u, r%u, %u", rd, rs, mov_cc);
t1 = newTemp(Ity_I1);
t2 = newTemp(Ity_I32);
t3 = newTemp(Ity_I1);
break;
}
case 0x0A: { /* MOVZ */
- DIP("movz r%d, r%d, r%d", rd, rs, rt);
+ DIP("movz r%u, r%u, r%u", rd, rs, rt);
t1 = newTemp(ty);
t2 = newTemp(ty);
if (mode64) {
}
case 0x0B: { /* MOVN */
- DIP("movn r%d, r%d, r%d", rd, rs, rt);
+ DIP("movn r%u, r%u, r%u", rd, rs, rt);
t1 = newTemp(ty);
t2 = newTemp(ty);
if (mode64) {
goto decode_failure_dsp;
}
} else {
- DIP("mult r%d, r%d", rs, rt);
+ DIP("mult r%u, r%u", rs, rt);
t2 = newTemp(Ity_I64);
assign(t2, binop(Iop_MullS32, mkNarrowTo32(ty, getIReg(rs)),
goto decode_failure_dsp;
}
} else {
- DIP("multu r%d, r%d", rs, rt);
+ DIP("multu r%u, r%u", rs, rt);
t2 = newTemp(Ity_I64);
assign(t2, binop(Iop_MullU32, mkNarrowTo32(ty, getIReg(rs)),
}
}
case 0x20: { /* ADD */
- DIP("add r%d, r%d, r%d", rd, rs, rt);
+ DIP("add r%u, r%u, r%u", rd, rs, rt);
IRTemp tmpRs32 = newTemp(Ity_I32);
IRTemp tmpRt32 = newTemp(Ity_I32);
break;
}
case 0x1A: /* DIV */
- DIP("div r%d, r%d", rs, rt);
+ DIP("div r%u, r%u", rs, rt);
if (mode64) {
t2 = newTemp(Ity_I64);
break;
case 0x1B: /* DIVU */
- DIP("divu r%d, r%d", rs, rt);
+ DIP("divu r%u, r%u", rs, rt);
if (mode64) {
t2 = newTemp(Ity_I64);
}
break;
} else {
- DIP("mfhi r%d", rd);
+ DIP("mfhi r%u", rd);
putIReg(rd, getHI());
break;
}
}
break;
} else {
- DIP("mthi r%d", rs);
+ DIP("mthi r%u", rs);
putHI(getIReg(rs));
break;
}
}
break;
} else {
- DIP("mflo r%d", rd);
+ DIP("mflo r%u", rd);
putIReg(rd, getLO());
break;
}
}
break;
} else {
- DIP("mtlo r%d", rs);
+ DIP("mtlo r%u", rs);
putLO(getIReg(rs));
break;
}
}
case 0x21: /* ADDU */
- DIP("addu r%d, r%d, r%d", rd, rs, rt);
+ DIP("addu r%u, r%u, r%u", rd, rs, rt);
if (mode64) {
ALU_PATTERN64(Iop_Add32);
} else {
break;
case 0x22: { /* SUB */
- DIP("sub r%d, r%d, r%d", rd, rs, rt);
+ DIP("sub r%u, r%u, r%u", rd, rs, rt);
IRTemp tmpRs32 = newTemp(Ity_I32);
IRTemp tmpRt32 = newTemp(Ity_I32);
break;
}
case 0x23: /* SUBU */
- DIP("subu r%d, r%d, r%d", rd, rs, rt);
+ DIP("subu r%u, r%u, r%u", rd, rs, rt);
if (mode64) {
ALU_PATTERN64(Iop_Sub32);
} else {
break;
case 0x24: /* AND */
- DIP("and r%d, r%d, r%d", rd, rs, rt);
+ DIP("and r%u, r%u, r%u", rd, rs, rt);
if (mode64) {
ALU_PATTERN(Iop_And64);
} else {
break;
case 0x25: /* OR */
- DIP("or r%d, r%d, r%d", rd, rs, rt);
+ DIP("or r%u, r%u, r%u", rd, rs, rt);
if (mode64) {
ALU_PATTERN(Iop_Or64);
} else {
break;
case 0x26: /* XOR */
- DIP("xor r%d, r%d, r%d", rd, rs, rt);
+ DIP("xor r%u, r%u, r%u", rd, rs, rt);
if (mode64) {
ALU_PATTERN(Iop_Xor64);
} else {
break;
case 0x27: /* NOR */
- DIP("nor r%d, r%d, r%d", rd, rs, rt);
+ DIP("nor r%u, r%u, r%u", rd, rs, rt);
if (mode64)
putIReg(rd, unop(Iop_Not64, binop(Iop_Or64, getIReg(rs),
getIReg(rt))));
break;
case 0x08: /* JR */
- DIP("jr r%d", rs);
+ DIP("jr r%u", rs);
t0 = newTemp(ty);
assign(t0, getIReg(rs));
lastn = mkexpr(t0);
break;
case 0x09: /* JALR */
- DIP("jalr r%d r%d", rd, rs);
+ DIP("jalr r%u r%u", rd, rs);
if (mode64) {
putIReg(rd, mkU64(guest_PC_curr_instr + 8));
t0 = newTemp(Ity_I64);
break;
case 0x2A: /* SLT */
- DIP("slt r%d, r%d, r%d", rd, rs, rt);
+ DIP("slt r%u, r%u, r%u", rd, rs, rt);
if (mode64)
putIReg(rd, unop(Iop_1Uto64, binop(Iop_CmpLT64S, getIReg(rs),
getIReg(rt))));
break;
case 0x2B: /* SLTU */
- DIP("sltu r%d, r%d, r%d", rd, rs, rt);
+ DIP("sltu r%u, r%u, r%u", rd, rs, rt);
if (mode64)
putIReg(rd, unop(Iop_1Uto64, binop(Iop_CmpLT64U, getIReg(rs),
getIReg(rt))));
break;
case 0x00: { /* SLL */
- DIP("sll r%d, r%d, %d", rd, rt, sa);
+ DIP("sll r%u, r%u, %u", rd, rt, sa);
IRTemp tmpRt32 = newTemp(Ity_I32);
IRTemp tmpSh32 = newTemp(Ity_I32);
IRTemp tmpRd = newTemp(Ity_I64);
}
case 0x04: { /* SLLV */
- DIP("sllv r%d, r%d, r%d", rd, rt, rs);
+ DIP("sllv r%u, r%u, r%u", rd, rt, rs);
if (mode64) {
IRTemp tmpRs8 = newTemp(Ity_I8);
IRTemp tmpRt32 = newTemp(Ity_I32);
}
case 0x03: /* SRA */
- DIP("sra r%d, r%d, %d", rd, rt, sa);
+ DIP("sra r%u, r%u, %u", rd, rt, sa);
if (mode64) {
IRTemp tmpRt32 = newTemp(Ity_I32);
IRTemp tmpSh32 = newTemp(Ity_I32);
break;
case 0x07: /* SRAV */
- DIP("srav r%d, r%d, r%d", rd, rt, rs);
+ DIP("srav r%u, r%u, r%u", rd, rt, rs);
if (mode64) {
IRTemp tmpRt32 = newTemp(Ity_I32);
IRTemp tmpSh32 = newTemp(Ity_I32);
case 0x02: { /* SRL */
rot = get_rot(cins);
if (rot) {
- DIP("rotr r%d, r%d, %d", rd, rt, sa);
+ DIP("rotr r%u, r%u, %u", rd, rt, sa);
putIReg(rd, mkWidenFrom32(ty, genROR32(mkNarrowTo32(ty,
getIReg(rt)), sa), True));
} else {
- DIP("srl r%d, r%d, %d", rd, rt, sa);
+ DIP("srl r%u, r%u, %u", rd, rt, sa);
if (mode64) {
IRTemp tmpSh32 = newTemp(Ity_I32);
IRTemp tmpRt32 = newTemp(Ity_I32);
case 0x06: {
rot = get_rotv(cins);
if (rot) {
- DIP("rotrv r%d, r%d, r%d", rd, rt, rs);
+ DIP("rotrv r%u, r%u, r%u", rd, rt, rs);
putIReg(rd, mkWidenFrom32(ty, genRORV32(mkNarrowTo32(ty,
getIReg(rt)), mkNarrowTo32(ty, getIReg(rs))), True));
break;
} else { /* SRLV */
- DIP("srlv r%d, r%d, r%d", rd, rt, rs);
+ DIP("srlv r%u, r%u, r%u", rd, rt, rs);
if (mode64) {
SXXV_PATTERN64(Iop_Shr32);
} else {
break;
case 0x30: { /* TGE */
- DIP("tge r%d, r%d %d", rs, rt, trap_code);
+ DIP("tge r%u, r%u %u", rs, rt, trap_code);
if (mode64) {
if (trap_code == 7)
stmt (IRStmt_Exit (unop (Iop_Not1,
break;
}
case 0x31: { /* TGEU */
- DIP("tgeu r%d, r%d %d", rs, rt, trap_code);
+ DIP("tgeu r%u, r%u %u", rs, rt, trap_code);
if (mode64) {
if (trap_code == 7)
stmt (IRStmt_Exit (unop (Iop_Not1,
break;
}
case 0x32: { /* TLT */
- DIP("tlt r%d, r%d %d", rs, rt, trap_code);
+ DIP("tlt r%u, r%u %u", rs, rt, trap_code);
if (mode64) {
if (trap_code == 7)
stmt(IRStmt_Exit(binop(Iop_CmpLT64S, getIReg(rs),
break;
}
case 0x33: { /* TLTU */
- DIP("tltu r%d, r%d %d", rs, rt, trap_code);
+ DIP("tltu r%u, r%u %u", rs, rt, trap_code);
if (mode64) {
if (trap_code == 7)
stmt(IRStmt_Exit(binop(Iop_CmpLT64U, getIReg(rs),
break;
}
case 0x34: { /* TEQ */
- DIP("teq r%d, r%d, %d", rs, rt, trap_code);
+ DIP("teq r%u, r%u, %u", rs, rt, trap_code);
if (mode64) {
if (trap_code == 7)
stmt(IRStmt_Exit(binop(Iop_CmpEQ64, getIReg(rs),
break;
}
case 0x36: { /* TNE */
- DIP("tne r%d, r%d %d", rs, rt, trap_code);
+ DIP("tne r%u, r%u %u", rs, rt, trap_code);
if (mode64) {
if (trap_code == 7)
stmt(IRStmt_Exit(binop(Iop_CmpNE64, getIReg(rs),
break;
case 0x2C: { /* Doubleword Add - DADD; MIPS64 */
- DIP("dadd r%d, r%d, r%d", rd, rs, rt);
+ DIP("dadd r%u, r%u, r%u", rd, rs, rt);
IRTemp tmpRs64 = newTemp(Ity_I64);
IRTemp tmpRt64 = newTemp(Ity_I64);
}
case 0x2D: /* Doubleword Add Unsigned - DADDU; MIPS64 */
- DIP("daddu r%d, r%d, r%d", rd, rs, rt);
+ DIP("daddu r%u, r%u, r%u", rd, rs, rt);
ALU_PATTERN(Iop_Add64);
break;
switch (rt) {
case 0x00: /* BLTZ */
- DIP("bltz r%d, %d", rs, imm);
+ DIP("bltz r%u, %u", rs, imm);
if (mode64) {
if (!dis_instr_branch(cins, &dres, resteerOkFn,
callback_opaque, &bstmt))
break;
case 0x01: /* BGEZ */
- DIP("bgez r%d, %d", rs, imm);
+ DIP("bgez r%u, %u", rs, imm);
if (mode64) {
if (!dis_instr_branch(cins, &dres, resteerOkFn,
callback_opaque, &bstmt))
break;
case 0x02: /* BLTZL */
- DIP("bltzl r%d, %d", rs, imm);
+ DIP("bltzl r%u, %u", rs, imm);
lastn = dis_branch_likely(binop(mode64 ? Iop_CmpNE64 : Iop_CmpNE32,
binop(mode64 ? Iop_And64 : Iop_And32, getIReg(rs),
mode64 ? mkU64(0x8000000000000000ULL) : mkU32(0x80000000)),
break;
case 0x03: /* BGEZL */
- DIP("bgezl r%d, %d", rs, imm);
+ DIP("bgezl r%u, %u", rs, imm);
lastn = dis_branch_likely(binop(mode64 ? Iop_CmpNE64 : Iop_CmpNE32,
binop(mode64 ? Iop_And64 : Iop_And32, getIReg(rs),
mode64 ? mkU64(0x8000000000000000ULL) : mkU32(0x80000000)),
break;
case 0x10: /* BLTZAL */
- DIP("bltzal r%d, %d", rs, imm);
+ DIP("bltzal r%u, %u", rs, imm);
if (mode64) {
if (!dis_instr_branch(cins, &dres, resteerOkFn,
callback_opaque, &bstmt))
break;
case 0x12: /* BLTZALL */
- DIP("bltzall r%d, %d", rs, imm);
+ DIP("bltzall r%u, %u", rs, imm);
putIReg(31, mode64 ? mkU64(guest_PC_curr_instr + 8) :
mkU32(guest_PC_curr_instr + 8));
lastn = dis_branch_likely(binop(mode64 ? Iop_CmpNE64 : Iop_CmpNE32,
break;
case 0x11: /* BGEZAL */
- DIP("bgezal r%d, %d", rs, imm);
+ DIP("bgezal r%u, %u", rs, imm);
if (mode64) {
if (!dis_instr_branch(cins, &dres, resteerOkFn,
callback_opaque, &bstmt))
break;
case 0x13: /* BGEZALL */
- DIP("bgezall r%d, %d", rs, imm);
+ DIP("bgezall r%u, %u", rs, imm);
if (mode64) {
putIReg(31, mkU64(guest_PC_curr_instr + 8));
lastn = dis_branch_likely(binop(Iop_CmpNE64,
break;
case 0x08: /* TGEI */
- DIP("tgei r%d, %d %d", rs, imm, trap_code);
+ DIP("tgei r%u, %u %u", rs, imm, trap_code);
if (mode64) {
stmt (IRStmt_Exit (unop (Iop_Not1,
binop (Iop_CmpLT64S,
break;
case 0x09: { /* TGEIU */
- DIP("tgeiu r%d, %d %d", rs, imm, trap_code);
+ DIP("tgeiu r%u, %u %u", rs, imm, trap_code);
if (mode64) {
stmt (IRStmt_Exit (unop (Iop_Not1,
binop (Iop_CmpLT64U,
break;
}
case 0x0A: { /* TLTI */
- DIP("tlti r%d, %d %d", rs, imm, trap_code);
+ DIP("tlti r%u, %u %u", rs, imm, trap_code);
if (mode64) {
stmt (IRStmt_Exit (binop (Iop_CmpLT64S, getIReg (rs),
mkU64 (extend_s_16to64 (imm))),
break;
}
case 0x0B: { /* TLTIU */
- DIP("tltiu r%d, %d %d", rs, imm, trap_code);
+ DIP("tltiu r%u, %u %u", rs, imm, trap_code);
if (mode64) {
stmt (IRStmt_Exit (binop (Iop_CmpLT64U, getIReg (rs),
mkU64 (extend_s_16to64 (imm))),
break;
}
case 0x0C: { /* TEQI */
- DIP("teqi r%d, %d %d", rs, imm, trap_code);
+ DIP("teqi r%u, %u %u", rs, imm, trap_code);
if (mode64) {
stmt (IRStmt_Exit (binop (Iop_CmpEQ64, getIReg (rs),
mkU64 (extend_s_16to64 (imm))),
break;
}
case 0x0E: { /* TNEI */
- DIP("tnei r%d, %d %d", rs, imm, trap_code);
+ DIP("tnei r%u, %u %u", rs, imm, trap_code);
if (mode64) {
stmt (IRStmt_Exit (binop (Iop_CmpNE64, getIReg (rs),
mkU64 (extend_s_16to64 (imm))),
break;
}
case 0x1C: { /* BPOSGE32 */
- DIP("bposge32 %d", imm);
+ DIP("bposge32 %u", imm);
vassert(!mode64);
t0 = newTemp(Ity_I32);
/* Get pos field from DSPControl register. */
break;
case 0x04:
- DIP("beq r%d, r%d, %d", rs, rt, imm);
+ DIP("beq r%u, r%u, %u", rs, rt, imm);
if (mode64)
dis_branch(False, binop(Iop_CmpEQ64, getIReg(rs), getIReg(rt)),
imm, &bstmt);
break;
case 0x14:
- DIP("beql r%d, r%d, %d", rs, rt, imm);
+ DIP("beql r%u, r%u, %u", rs, rt, imm);
lastn = dis_branch_likely(binop(mode64 ? Iop_CmpNE64 : Iop_CmpNE32,
getIReg(rs), getIReg(rt)), imm);
break;
case 0x05:
- DIP("bne r%d, r%d, %d", rs, rt, imm);
+ DIP("bne r%u, r%u, %u", rs, rt, imm);
if (mode64)
dis_branch(False, binop(Iop_CmpNE64, getIReg(rs), getIReg(rt)),
imm, &bstmt);
break;
case 0x15:
- DIP("bnel r%d, r%d, %d", rs, rt, imm);
+ DIP("bnel r%u, r%u, %u", rs, rt, imm);
lastn = dis_branch_likely(binop(mode64 ? Iop_CmpEQ64 : Iop_CmpEQ32,
getIReg(rs), getIReg(rt)), imm);
break;
case 0x07: /* BGTZ */
- DIP("bgtz r%d, %d", rs, imm);
+ DIP("bgtz r%u, %u", rs, imm);
if (mode64)
dis_branch(False, unop(Iop_Not1, binop(Iop_CmpLE64S, getIReg(rs),
mkU64(0x00))), imm, &bstmt);
break;
case 0x17: /* BGTZL */
- DIP("bgtzl r%d, %d", rs, imm);
+ DIP("bgtzl r%u, %u", rs, imm);
if (mode64)
lastn = dis_branch_likely(binop(Iop_CmpLE64S, getIReg(rs),
mkU64(0x00)), imm);
break;
case 0x06: /* BLEZ */
- DIP("blez r%d, %d", rs, imm);
+ DIP("blez r%u, %u", rs, imm);
if (mode64)
dis_branch(False, binop(Iop_CmpLE64S, getIReg(rs), mkU64(0x0)),
imm, &bstmt);
break;
case 0x16: /* BLEZL */
- DIP("blezl r%d, %d", rs, imm);
+ DIP("blezl r%u, %u", rs, imm);
lastn = dis_branch_likely(unop(Iop_Not1, (binop(mode64 ? Iop_CmpLE64S :
Iop_CmpLE32S, getIReg(rs), mode64 ?
mkU64(0x0) : mkU32(0x0)))), imm);
break;
case 0x08: { /* ADDI */
- DIP("addi r%d, r%d, %d", rt, rs, imm);
+ DIP("addi r%u, r%u, %u", rt, rs, imm);
IRTemp tmpRs32 = newTemp(Ity_I32);
assign(tmpRs32, mkNarrowTo32(ty, getIReg(rs)));
break;
}
case 0x09: /* ADDIU */
- DIP("addiu r%d, r%d, %d", rt, rs, imm);
+ DIP("addiu r%u, r%u, %u", rt, rs, imm);
if (mode64) {
putIReg(rt, mkWidenFrom32(ty, binop(Iop_Add32,
mkNarrowTo32(ty, getIReg(rs)),mkU32(extend_s_16to32(imm))),
break;
case 0x0C: /* ANDI */
- DIP("andi r%d, r%d, %d", rt, rs, imm);
+ DIP("andi r%u, r%u, %u", rt, rs, imm);
if (mode64) {
ALUI_PATTERN64(Iop_And64);
} else {
break;
case 0x0E: /* XORI */
- DIP("xori r%d, r%d, %d", rt, rs, imm);
+ DIP("xori r%u, r%u, %u", rt, rs, imm);
if (mode64) {
ALUI_PATTERN64(Iop_Xor64);
} else {
break;
case 0x0D: /* ORI */
- DIP("ori r%d, r%d, %d", rt, rs, imm);
+ DIP("ori r%u, r%u, %u", rt, rs, imm);
if (mode64) {
ALUI_PATTERN64(Iop_Or64);
} else {
break;
case 0x0A: /* SLTI */
- DIP("slti r%d, r%d, %d", rt, rs, imm);
+ DIP("slti r%u, r%u, %u", rt, rs, imm);
if (mode64)
putIReg(rt, unop(Iop_1Uto64, binop(Iop_CmpLT64S, getIReg(rs),
mkU64(extend_s_16to64(imm)))));
break;
case 0x0B: /* SLTIU */
- DIP("sltiu r%d, r%d, %d", rt, rs, imm);
+ DIP("sltiu r%u, r%u, %u", rt, rs, imm);
if (mode64)
putIReg(rt, unop(Iop_1Uto64, binop(Iop_CmpLT64U, getIReg(rs),
mkU64(extend_s_16to64(imm)))));
break;
case 0x18: { /* Doubleword Add Immidiate - DADD; MIPS64 */
- DIP("daddi r%d, r%d, %d", rt, rs, imm);
+ DIP("daddi r%u, r%u, %u", rt, rs, imm);
IRTemp tmpRs64 = newTemp(Ity_I64);
assign(tmpRs64, getIReg(rs));
}
case 0x19: /* Doubleword Add Immidiate Unsigned - DADDIU; MIPS64 */
- DIP("daddiu r%d, r%d, %d", rt, rs, imm);
+ DIP("daddiu r%u, r%u, %u", rt, rs, imm);
putIReg(rt, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm))));
break;
case 0x1A: {
/* Load Doubleword Left - LDL; MIPS64 */
vassert(mode64);
- DIP("ldl r%u, %d(r%u)", rt, imm, rs);
+ DIP("ldl r%u, %u(r%u)", rt, imm, rs);
/* t1 = addr */
#if defined (_MIPSEL)
t1 = newTemp(Ity_I64);
case 0x1B: {
/* Load Doubleword Right - LDR; MIPS64 */
vassert(mode64);
- DIP("ldr r%u,%d(r%u)", rt, imm, rs);
+ DIP("ldr r%u,%u(r%u)", rt, imm, rs);
/* t1 = addr */
#if defined (_MIPSEL)
t1 = newTemp(Ity_I64);
}
case 0x27: /* Load Word unsigned - LWU; MIPS64 */
- DIP("lwu r%u,%d(r%u)", rt, imm, rs);
+ DIP("lwu r%u,%u(r%u)", rt, imm, rs);
LOAD_STORE_PATTERN;
putIReg(rt, mkWidenFrom32(ty, load(Ity_I32, mkexpr(t1)), False));
break;
case 0x30: /* LL / LWC0 */
- DIP("ll r%d, %d(r%d)", rt, imm, rs);
+ DIP("ll r%u, %u(r%u)", rt, imm, rs);
LOAD_STORE_PATTERN;
t2 = newTemp(Ity_I32);
break;
case 0x34: /* Load Linked Doubleword - LLD; MIPS64 */
- DIP("lld r%d, %d(r%d)", rt, imm, rs);
+ DIP("lld r%u, %u(r%u)", rt, imm, rs);
LOAD_STORE_PATTERN;
t2 = newTemp(Ity_I64);
break;
case 0x38: /* SC / SWC0 */
- DIP("sc r%d, %d(r%d)", rt, imm, rs);
+ DIP("sc r%u, %u(r%u)", rt, imm, rs);
LOAD_STORE_PATTERN;
t2 = newTemp(Ity_I1);
break;
case 0x3C: /* Store Conditional Doubleword - SCD; MIPS64 */
- DIP("sdc r%d, %d(r%d)", rt, imm, rs);
+ DIP("sdc r%u, %u(r%u)", rt, imm, rs);
LOAD_STORE_PATTERN;
t2 = newTemp(Ity_I1);
break;
case 0x37: /* Load Doubleword - LD; MIPS64 */
- DIP("ld r%u, %d(r%u)", rt, imm, rs);
+ DIP("ld r%u, %u(r%u)", rt, imm, rs);
LOAD_STORE_PATTERN;
putIReg(rt, load(Ity_I64, mkexpr(t1)));
break;
case 0x3F: /* Store Doubleword - SD; MIPS64 */
- DIP("sd r%u, %d(r%u)", rt, imm, rs);
+ DIP("sd r%u, %u(r%u)", rt, imm, rs);
LOAD_STORE_PATTERN;
store(mkexpr(t1), getIReg(rt));
break;
case 0x32: /* Branch on Bit Clear - BBIT0; Cavium OCTEON */
/* Cavium Specific instructions. */
if (VEX_MIPS_COMP_ID(archinfo->hwcaps) == VEX_PRID_COMP_CAVIUM) {
- DIP("bbit0 r%d, 0x%x, %x", rs, rt, imm);
+ DIP("bbit0 r%u, 0x%x, %x", rs, rt, imm);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
assign(t0, mkU32(0x1));
case 0x36: /* Branch on Bit Clear Plus 32 - BBIT032; Cavium OCTEON */
/* Cavium Specific instructions. */
if (VEX_MIPS_COMP_ID(archinfo->hwcaps) == VEX_PRID_COMP_CAVIUM) {
- DIP("bbit032 r%d, 0x%x, %x", rs, rt, imm);
+ DIP("bbit032 r%u, 0x%x, %x", rs, rt, imm);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I8); /* Shift. */
t2 = newTemp(Ity_I64);
case 0x3A: /* Branch on Bit Set - BBIT1; Cavium OCTEON */
/* Cavium Specific instructions. */
if (VEX_MIPS_COMP_ID(archinfo->hwcaps) == VEX_PRID_COMP_CAVIUM) {
- DIP("bbit1 r%d, 0x%x, %x", rs, rt, imm);
+ DIP("bbit1 r%u, 0x%x, %x", rs, rt, imm);
t0 = newTemp(Ity_I32);
t1 = newTemp(Ity_I32);
assign(t0, mkU32(0x1));
case 0x3E: /* Branch on Bit Set Plus 32 - BBIT132; Cavium OCTEON */
/* Cavium Specific instructions. */
if (VEX_MIPS_COMP_ID(archinfo->hwcaps) == VEX_PRID_COMP_CAVIUM) {
- DIP("bbit132 r%d, 0x%x, %x", rs, rt, imm);
+ DIP("bbit132 r%u, 0x%x, %x", rs, rt, imm);
t0 = newTemp(Ity_I64);
t1 = newTemp(Ity_I8); /* Shift. */
t2 = newTemp(Ity_I64);
if (sigill_diag)
vex_printf("vex mips->IR: unhandled instruction bytes: "
"0x%x 0x%x 0x%x 0x%x\n",
- (Int) getIByte(delta_start + 0),
- (Int) getIByte(delta_start + 1),
- (Int) getIByte(delta_start + 2),
- (Int) getIByte(delta_start + 3));
+ (UInt) getIByte(delta_start + 0),
+ (UInt) getIByte(delta_start + 1),
+ (UInt) getIByte(delta_start + 2),
+ (UInt) getIByte(delta_start + 3));
/* Tell the dispatcher that this insn cannot be decoded, and so has
not been executed, and (is currently) the next to be executed.
DIP("lis r%u,%d\n", rD_addr, (Int)simm16);
assign( rD, mkSzExtendS32(ty, uimm16 << 16) );
} else {
- DIP("addis r%u,r%u,0x%x\n", rD_addr, rA_addr, (Int)simm16);
+ DIP("addis r%u,r%u,0x%x\n", rD_addr, rA_addr, (UInt)simm16);
assign( rD, binop( mkSzOp(ty, Iop_Add8), mkexpr(rA),
mkSzExtendS32(ty, uimm16 << 16) ) );
}
IRType ty = mode64 ? Ity_I64 : Ity_I32;
Addr64 tgt = 0;
- Int BD = extend_s_16to32(BD_u16);
+ UInt BD = extend_s_16to32(BD_u16);
IRTemp do_branch = newTemp(Ity_I32);
IRTemp ctr_ok = newTemp(Ity_I32);
IRTemp cond_ok = newTemp(Ity_I32);
b11to15 are a branch hint, and so we only need to ensure it's
of the form 000XX. */
if ((b11to15 & ~3) != 0) {
- vex_printf("dis_int_branch(ppc)(0x13,b11to15)(%d)\n", (Int)b11to15);
+ vex_printf("dis_int_branch(ppc)(0x13,b11to15)(%d)\n", b11to15);
return False;
}
mkU32( (UInt)simm16 ),
cia );
if (TO == 4) {
- DIP("tweqi r%u,%d\n", (UInt)rA_addr, (Int)simm16);
+ DIP("tweqi r%u,%d\n", rA_addr, (Int)simm16);
} else {
- DIP("tw%di r%u,%d\n", (Int)TO, (UInt)rA_addr, (Int)simm16);
+ DIP("tw%di r%u,%d\n", TO, rA_addr, (Int)simm16);
}
break;
case 0x02: // tdi
return False;
uncond = do_trap( TO, getIReg(rA_addr), mkU64( (ULong)simm16 ), cia );
if (TO == 4) {
- DIP("tdeqi r%u,%d\n", (UInt)rA_addr, (Int)simm16);
+ DIP("tdeqi r%u,%d\n", rA_addr, (Int)simm16);
} else {
- DIP("td%di r%u,%d\n", (Int)TO, (UInt)rA_addr, (Int)simm16);
+ DIP("td%di r%u,%d\n", TO, rA_addr, (Int)simm16);
}
break;
default:
: getIReg(rB_addr),
cia );
if (TO == 4) {
- DIP("tweq r%u,r%u\n", (UInt)rA_addr, (UInt)rB_addr);
+ DIP("tweq r%u,r%u\n", rA_addr, rB_addr);
} else {
- DIP("tw%d r%u,r%u\n", (Int)TO, (UInt)rA_addr, (UInt)rB_addr);
+ DIP("tw%d r%u,r%u\n", TO, rA_addr, rB_addr);
}
break;
case 0x044: // td (Trap Doubleword, PPC64 p534)
return False;
uncond = do_trap( TO, getIReg(rA_addr), getIReg(rB_addr), cia );
if (TO == 4) {
- DIP("tdeq r%u,r%u\n", (UInt)rA_addr, (UInt)rB_addr);
+ DIP("tdeq r%u,r%u\n", rA_addr, rB_addr);
} else {
- DIP("td%d r%u,r%u\n", (Int)TO, (UInt)rA_addr, (UInt)rB_addr);
+ DIP("td%d r%u,r%u\n", TO, rA_addr, rB_addr);
}
break;
default:
in the documentation) is merely a hint bit to the
hardware, I think as to whether or not contention is
likely. So we can just ignore it. */
- DIP("lwarx r%u,r%u,r%u,EH=%u\n", rD_addr, rA_addr, rB_addr, (UInt)b0);
+ DIP("lwarx r%u,r%u,r%u,EH=%u\n", rD_addr, rA_addr, rB_addr, b0);
// trap if misaligned
gen_SIGBUS_if_misaligned( EA, 4 );
in the documentation) is merely a hint bit to the
hardware, I think as to whether or not contention is
likely. So we can just ignore it. */
- DIP("lbarx r%u,r%u,r%u,EH=%u\n", rD_addr, rA_addr, rB_addr, (UInt)b0);
+ DIP("lbarx r%u,r%u,r%u,EH=%u\n", rD_addr, rA_addr, rB_addr, b0);
// and actually do the load
res = newTemp(Ity_I8);
in the documentation) is merely a hint bit to the
hardware, I think as to whether or not contention is
likely. So we can just ignore it. */
- DIP("lharx r%u,r%u,r%u,EH=%u\n", rD_addr, rA_addr, rB_addr, (UInt)b0);
+ DIP("lharx r%u,r%u,r%u,EH=%u\n", rD_addr, rA_addr, rB_addr, b0);
// trap if misaligned
gen_SIGBUS_if_misaligned( EA, 2 );
likely. So we can just ignore it. */
if (!mode64)
return False;
- DIP("ldarx r%u,r%u,r%u,EH=%u\n", rD_addr, rA_addr, rB_addr, (UInt)b0);
+ DIP("ldarx r%u,r%u,r%u,EH=%u\n", rD_addr, rA_addr, rB_addr, b0);
// trap if misaligned
gen_SIGBUS_if_misaligned( EA, 8 );
in the documentation) is merely a hint bit to the
hardware, I think as to whether or not contention is
likely. So we can just ignore it. */
- DIP("lqarx r%u,r%u,r%u,EH=%u\n", rD_addr, rA_addr, rB_addr, (UInt)b0);
+ DIP("lqarx r%u,r%u,r%u,EH=%u\n", rD_addr, rA_addr, rB_addr, b0);
// trap if misaligned
gen_SIGBUS_if_misaligned( EA, 16 );
stmt( IRStmt_Dirty(d) );
putIReg( rD_addr,
mkWidenFrom32(ty, mkexpr(val), False/*unsigned*/) );
- DIP("mfspr r%u,%u", rD_addr, (UInt)SPR);
+ DIP("mfspr r%u,%u", rD_addr, SPR);
break;
}
stmt( IRStmt_Dirty(d) );
putIReg( rD_addr,
mkWidenFrom32(ty, mkexpr(val), False/*unsigned*/) );
- DIP("mfspr r%u,%u", rD_addr, (UInt)SPR);
+ DIP("mfspr r%u,%u", rD_addr, SPR);
break;
}
UChar rA_addr = ifieldRegA(theInstr);
IRExpr * high64;
IRTemp vS = newTemp( Ity_V128 );
- DIP("mfvsrd r%u,vsr%d\n", rA_addr, (UInt)XS);
+ DIP("mfvsrd r%u,vsr%d\n", rA_addr, XS);
/* XS = SX || S
* For SX=0, mfvsrd is treated as a Floating-Point
UChar rA_addr = ifieldRegA(theInstr);
IRExpr * high64;
IRTemp vS = newTemp( Ity_V128 );
- DIP("mfvsrwz r%u,vsr%d\n", rA_addr, (UInt)XS);
+ DIP("mfvsrwz r%u,vsr%d\n", rA_addr, XS);
/* XS = SX || S
* For SX=0, mfvsrwz is treated as a Floating-Point
* instruction in terms of resource availability.
UChar XT = ifieldRegXT( theInstr );
UChar rA_addr = ifieldRegA(theInstr);
IRTemp rA = newTemp(ty);
- DIP("mtvsrd vsr%d,r%u\n", (UInt)XT, rA_addr);
+ DIP("mtvsrd vsr%d,r%u\n", XT, rA_addr);
/* XS = SX || S
* For SX=0, mfvsrd is treated as a Floating-Point
* instruction in terms of resource availability.
UChar XT = ifieldRegXT( theInstr );
UChar rA_addr = ifieldRegA(theInstr);
IRTemp rA = newTemp( Ity_I32 );
- DIP("mtvsrwa vsr%d,r%u\n", (UInt)XT, rA_addr);
+ DIP("mtvsrwa vsr%d,r%u\n", XT, rA_addr);
/* XS = SX || S
* For SX=0, mtvsrwa is treated as a Floating-Point
* instruction in terms of resource availability.
UChar XT = ifieldRegXT( theInstr );
UChar rA_addr = ifieldRegA(theInstr);
IRTemp rA = newTemp( Ity_I32 );
- DIP("mtvsrwz vsr%d,r%u\n", rA_addr, (UInt)XT);
+ DIP("mtvsrwz vsr%d,r%u\n", rA_addr, XT);
/* XS = SX || S
* For SX=0, mtvsrwz is treated as a Floating-Point
* instruction in terms of resource availability.
if (opc1 != 0x1F || b0 != 0) {
if (0) vex_printf("dis_cache_manage %d %d\n",
- (Int)opc1, (Int)b0);
+ opc1, b0);
vex_printf("dis_cache_manage(ppc)(opc1|b0)\n");
return False;
}
vex_printf("dis_fp_scr(ppc)(instr,mtfsfi)\n");
return False;
}
- DIP("mtfsfi%s crf%d,%d\n", flag_rC ? ".":"", crfD, IMM);
+ DIP("mtfsfi%s crf%u,%d\n", flag_rC ? ".":"", crfD, IMM);
if (GX_level) {
/* This implies that Decimal Floating Point is supported, and the
* FPSCR must be managed as a 64-bit register.
assign( gfield_0_4_shift, mkU8( 31 - 5 ) ); // G-field[0:4]
switch (opc1) {
case 0x3b: // dtstdc, dtstdg
- DIP("dtstd%s %u,r%u,%d\n", opc2 == 0xc2 ? "c" : "g",
+ DIP("dtstd%s %u,r%u,%u\n", opc2 == 0xc2 ? "c" : "g",
crfD, frA_addr, DCM);
/* setup the parameters for the long format of the two instructions */
assign( frAI64_lo, mkU64( 0 ) );
break;
case 0x3F: // dtstdcq, dtstdgq
- DIP("dtstd%sq %u,r%u,%d\n", opc2 == 0xc2 ? "c" : "g",
+ DIP("dtstd%sq %u,r%u,%u\n", opc2 == 0xc2 ? "c" : "g",
crfD, frA_addr, DCM);
/* setup the parameters for the extended format of the
* two instructions
case 0x2B0:
// xscvdpsxds (VSX Scalar truncate Double-Precision to integer and Convert
// to Signed Integer Doubleword format with Saturate)
- DIP("xscvdpsxds v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xscvdpsxds v%u,v%u\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128, binop( Iop_F64toI64S,
mkU32( Irrm_ZERO ),
break;
case 0x0b0: // xscvdpsxws (VSX Scalar truncate Double-Precision to integer and
// Convert to Signed Integer Word format with Saturate)
- DIP("xscvdpsxws v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xscvdpsxws v%u,v%u\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
unop( Iop_32Sto64,
break;
case 0x290: // xscvdpuxds (VSX Scalar truncate Double-Precision integer and Convert
// to Unsigned Integer Doubleword format with Saturate)
- DIP("xscvdpuxds v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xscvdpuxds v%u,v%u\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
binop( Iop_F64toI64U,
case 0x270:
// xscvsxdsp (VSX Scalar Convert and round Signed Integer Doubleword
// to Single-Precision format)
- DIP("xscvsxdsp v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xscvsxdsp v%u,v%u\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
unop( Iop_ReinterpF64asI64,
case 0x2F0:
// xscvsxddp (VSX Scalar Convert and round Signed Integer Doubleword to
// Double-Precision format)
- DIP("xscvsxddp v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xscvsxddp v%u,v%u\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128, unop( Iop_ReinterpF64asI64,
binop( Iop_I64StoF64, get_IR_roundingmode(),
case 0x250:
// xscvuxdsp (VSX Scalar Convert and round Unsigned Integer
// Doubleword to Singel-Precision format)
- DIP("xscvuxdsp v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xscvuxdsp v%u,v%u\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
unop( Iop_ReinterpF64asI64,
case 0x2D0:
// xscvuxddp (VSX Scalar Convert and round Unsigned Integer Doubleword to
// Double-Precision format)
- DIP("xscvuxddp v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xscvuxddp v%u,v%u\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128, unop( Iop_ReinterpF64asI64,
binop( Iop_I64UtoF64, get_IR_roundingmode(),
IRTemp loResult_32 = newTemp(Ity_I32);
IRExpr* rmZero = mkU32(Irrm_ZERO);
- DIP("xvcvdpsxws v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xvcvdpsxws v%u,v%u\n", XT, XB);
assign(hiResult_32, binop(Iop_F64toI32S, rmZero, mkexpr(xB)));
assign(loResult_32, binop(Iop_F64toI32S, rmZero, mkexpr(xB2)));
putVSReg( XT,
Bool un_signed = (opc2 == 0x110);
IROp op = un_signed ? Iop_QFtoI32Ux4_RZ : Iop_QFtoI32Sx4_RZ;
- DIP("xvcvsp%sxws v%u,v%u\n", un_signed ? "u" : "s", (UInt)XT, (UInt)XB);
+ DIP("xvcvsp%sxws v%u,v%u\n", un_signed ? "u" : "s", XT, XB);
/* The xvcvsp{s|u}xws instruction is similar to vct{s|u}xs, except if src is a NaN,
* then result is set to 0x80000000. */
assign(tempResult, unop(op, getVSReg(XB)));
}
case 0x212: // xscvdpsp (VSX Scalar round Double-Precision to single-precision and
// Convert to Single-Precision format
- DIP("xscvdpsp v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xscvdpsp v%u,v%u\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
binop( Iop_32HLto64,
break;
case 0x216: /* xscvdpspn (VSX Scalar convert scalar Single-Precision to
vector Single-Precision non-signalling */
- DIP("xscvdpspn v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xscvdpspn v%u,v%u\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
binop( Iop_32HLto64,
break;
case 0x090: // xscvdpuxws (VSX Scalar truncate Double-Precision to integer
// and Convert to Unsigned Integer Word format with Saturate)
- DIP("xscvdpuxws v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xscvdpuxws v%u,v%u\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
binop( Iop_32HLto64,
mkU64( 0ULL ) ) );
break;
case 0x292: // xscvspdp (VSX Scalar Convert Single-Precision to Double-Precision format, signaling)
- DIP("xscvspdp v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xscvspdp v%u,v%u\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
unop( Iop_ReinterpF64asI64,
mkU64( 0ULL ) ) );
break;
case 0x296: // xscvspdpn (VSX Scalar Convert Single-Precision to Double-Precision format Non signaling)
- DIP("xscvspdpn v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xscvspdpn v%u,v%u\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
unop( Iop_ReinterpF64asI64,
break;
case 0x312: // xvcvdpsp (VSX Vector round Double-Precision to single-precision
// and Convert to Single-Precision format)
- DIP("xvcvdpsp v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xvcvdpsp v%u,v%u\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
binop( Iop_32HLto64,
case 0x390: // xvcvdpuxds (VSX Vector truncate Double-Precision to integer
// and Convert to Unsigned Integer Doubleword format
// with Saturate)
- DIP("xvcvdpuxds v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xvcvdpuxds v%u,v%u\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
binop( Iop_F64toI64U, mkU32( Irrm_ZERO ), mkexpr( xB ) ),
break;
case 0x190: // xvcvdpuxws (VSX Vector truncate Double-Precision to integer and
// Convert to Unsigned Integer Word format with Saturate)
- DIP("xvcvdpuxws v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xvcvdpuxws v%u,v%u\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
binop( Iop_32HLto64,
mkU32( 0 ) ) ) );
break;
case 0x392: // xvcvspdp (VSX Vector Convert Single-Precision to Double-Precision format)
- DIP("xvcvspdp v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xvcvspdp v%u,v%u\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
unop( Iop_ReinterpF64asI64,
break;
case 0x330: // xvcvspsxds (VSX Vector truncate Single-Precision to integer and
// Convert to Signed Integer Doubleword format with Saturate)
- DIP("xvcvspsxds v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xvcvspsxds v%u,v%u\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
binop( Iop_F64toI64S,
break;
case 0x310: // xvcvspuxds (VSX Vector truncate Single-Precision to integer and
// Convert to Unsigned Integer Doubleword format with Saturate)
- DIP("xvcvspuxds v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xvcvspuxds v%u,v%u\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
binop( Iop_F64toI64U,
break;
case 0x3B0: // xvcvdpsxds (VSX Vector truncate Double-Precision to integer and
// Convert to Signed Integer Doubleword format with Saturate)
- DIP("xvcvdpsxds v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xvcvdpsxds v%u,v%u\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
binop( Iop_F64toI64S, mkU32( Irrm_ZERO ), mkexpr( xB ) ),
break;
case 0x3f0: // xvcvsxddp (VSX Vector Convert and round Signed Integer Doubleword
// to Double-Precision format)
- DIP("xvcvsxddp v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xvcvsxddp v%u,v%u\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
unop( Iop_ReinterpF64asI64,
break;
case 0x3d0: // xvcvuxddp (VSX Vector Convert and round Unsigned Integer Doubleword
// to Double-Precision format)
- DIP("xvcvuxddp v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xvcvuxddp v%u,v%u\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
unop( Iop_ReinterpF64asI64,
break;
case 0x370: // xvcvsxdsp (VSX Vector Convert and round Signed Integer Doubleword
// to Single-Precision format)
- DIP("xvcvsxddp v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xvcvsxddp v%u,v%u\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
binop( Iop_32HLto64,
break;
case 0x350: // xvcvuxdsp (VSX Vector Convert and round Unsigned Integer Doubleword
// to Single-Precision format)
- DIP("xvcvuxddp v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xvcvuxddp v%u,v%u\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
binop( Iop_32HLto64,
break;
case 0x1f0: // xvcvsxwdp (VSX Vector Convert Signed Integer Word to Double-Precision format)
- DIP("xvcvsxwdp v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xvcvsxwdp v%u,v%u\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
unop( Iop_ReinterpF64asI64,
unop( Iop_32Sto64, mkexpr( b1 ) ) ) ) ) );
break;
case 0x1d0: // xvcvuxwdp (VSX Vector Convert Unsigned Integer Word to Double-Precision format)
- DIP("xvcvuxwdp v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xvcvuxwdp v%u,v%u\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
unop( Iop_ReinterpF64asI64,
unop( Iop_32Uto64, mkexpr( b1 ) ) ) ) ) );
break;
case 0x170: // xvcvsxwsp (VSX Vector Convert Signed Integer Word to Single-Precision format)
- DIP("xvcvsxwsp v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xvcvsxwsp v%u,v%u\n", XT, XB);
putVSReg( XT, unop( Iop_I32StoFx4, getVSReg( XB ) ) );
break;
case 0x150: // xvcvuxwsp (VSX Vector Convert Unsigned Integer Word to Single-Precision format)
- DIP("xvcvuxwsp v%u,v%u\n", (UInt)XT, (UInt)XB);
+ DIP("xvcvuxwsp v%u,v%u\n", XT, XB);
putVSReg( XT, unop( Iop_I32UtoFx4, getVSReg( XB ) ) );
break;
}
IRTemp hiResult = newTemp(Ity_I64);
IRTemp loResult = newTemp(Ity_I64);
- DIP("xv%sdp v%d,v%d,v%d\n", oper_name, (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xv%sdp v%d,v%d,v%d\n", oper_name, XT, XA, XB);
assign( hiResult,
unop( Iop_ReinterpF64asI64,
{
IRTemp hiResult = newTemp(Ity_I64);
IRTemp loResult = newTemp(Ity_I64);
- DIP("xvsqrtdp v%d,v%d\n", (UInt)XT, (UInt)XB);
+ DIP("xvsqrtdp v%d,v%d\n", XT, XB);
assign( hiResult,
unop( Iop_ReinterpF64asI64,
IRTemp frT = newTemp(Ity_F64);
IRTemp frT2 = newTemp(Ity_F64);
DIP("xv%sm%s%s v%d,v%d,v%d\n", negate ? "n" : "", oper_name, mdp ? "mdp" : "adp",
- (UInt)XT, (UInt)XA, (UInt)XB);
+ XT, XA, XB);
assign(frT, unop(Iop_ReinterpI64asF64, unop(Iop_V128HIto64, getVSReg( XT ) ) ) );
assign(frT2, unop(Iop_ReinterpI64asF64, unop(Iop_V128to64, getVSReg( XT ) ) ) );
IRTemp fe_flagHi, fg_flagHi, fe_flagLo, fg_flagLo;
fe_flagHi = fg_flagHi = fe_flagLo = fg_flagLo = IRTemp_INVALID;
- DIP("xvtsqrtdp cr%d,v%d\n", (UInt)crfD, (UInt)XB);
+ DIP("xvtsqrtdp cr%d,v%d\n", crfD, XB);
assign( frBHi_I64, unop(Iop_V128HIto64, getVSReg( XB )) );
assign( frBLo_I64, unop(Iop_V128to64, getVSReg( XB )) );
do_fp_tsqrt(frBHi_I64, False /*not single precision*/, &fe_flagHi, &fg_flagHi);
IRTemp fe_flagHi, fg_flagHi, fe_flagLo, fg_flagLo;
fe_flagHi = fg_flagHi = fe_flagLo = fg_flagLo = IRTemp_INVALID;
- DIP("xvtdivdp cr%d,v%d,v%d\n", (UInt)crfD, (UInt)XA, (UInt)XB);
+ DIP("xvtdivdp cr%d,v%d,v%d\n", crfD, XA, XB);
assign( frAHi_I64, unop(Iop_V128HIto64, getVSReg( XA )) );
assign( frALo_I64, unop(Iop_V128to64, getVSReg( XA )) );
assign( frBHi_I64, unop(Iop_V128HIto64, getVSReg( XB )) );
switch (opc2) {
case 0x100: // xvaddsp (VSX Vector Add Single-Precision)
- DIP("xvaddsp v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xvaddsp v%d,v%d,v%d\n", XT, XA, XB);
// WARNING: BOGUS! The backend ignores rm on Iop_Add32Fx4
putVSReg( XT, triop(Iop_Add32Fx4, rm,
getVSReg( XA ), getVSReg( XB )) );
break;
case 0x140: // xvmulsp (VSX Vector Multiply Single-Precision)
- DIP("xvmulsp v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xvmulsp v%d,v%d,v%d\n", XT, XA, XB);
// WARNING: BOGUS! The backend ignores rm on Iop_Mul32Fx4
putVSReg( XT, triop(Iop_Mul32Fx4, rm,
getVSReg( XA ), getVSReg( XB )) );
break;
case 0x120: // xvsubsp (VSX Vector Subtract Single-Precision)
- DIP("xvsubsp v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xvsubsp v%d,v%d,v%d\n", XT, XA, XB);
// WARNING: BOGUS! The backend ignores rm on Iop_Sub32Fx4
putVSReg( XT, triop(Iop_Sub32Fx4, rm,
getVSReg( XA ), getVSReg( XB )) );
* I will do the latter, due to the general philosophy of
* reusing existing implementations when practical.
*/
- DIP("xvdivsp v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xvdivsp v%d,v%d,v%d\n", XT, XA, XB);
breakV128to4xF64( getVSReg( XA ), &a3, &a2, &a1, &a0 );
breakV128to4xF64( getVSReg( XB ), &b3, &b2, &b1, &b0 );
}
case 0x116: // xvsqrtsp (VSX Vector Square Root Single-Precision)
{
- DIP("xvsqrtsp v%d,v%d\n", (UInt)XT, (UInt)XB);
+ DIP("xvsqrtsp v%d,v%d\n", XT, XB);
breakV128to4xF64( getVSReg( XB ), &b3, &b2, &b1, &b0 );
/* Note: The native xvsqrtsp insruction does not always give the same precision
* as what we get with Iop_SqrtF64. But it doesn't seem worthwhile to implement
negate = False;
}
- DIP("xv%sm%s%s v%d,v%d,v%d\n", negate ? "n" : "", oper_name, msp ? "msp" : "asp",
- (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xv%sm%s%s v%d,v%d,v%d\n", negate ? "n" : "", oper_name,
+ msp ? "msp" : "asp", XT, XA, XB);
t3 = t2 = t1 = t0 = IRTemp_INVALID;
breakV128to4xF64( getVSReg( XA ), &a3, &a2, &a1, &a0 );
IRTemp fe_flag2, fg_flag2, fe_flag3, fg_flag3;
fe_flag0 = fg_flag0 = fe_flag1 = fg_flag1 = IRTemp_INVALID;
fe_flag2 = fg_flag2 = fe_flag3 = fg_flag3 = IRTemp_INVALID;
- DIP("xvtsqrtsp cr%d,v%d\n", (UInt)crfD, (UInt)XB);
+ DIP("xvtsqrtsp cr%d,v%d\n", crfD, XB);
breakV128to4x32( getVSReg( XB ), &b3, &b2, &b1, &b0 );
do_fp_tsqrt(b0, True /* single precision*/, &fe_flag0, &fg_flag0);
IRTemp fe_flag2, fg_flag2, fe_flag3, fg_flag3;
fe_flag0 = fg_flag0 = fe_flag1 = fg_flag1 = IRTemp_INVALID;
fe_flag2 = fg_flag2 = fe_flag3 = fg_flag3 = IRTemp_INVALID;
- DIP("xvtdivsp cr%d,v%d,v%d\n", (UInt)crfD, (UInt)XA, (UInt)XB);
+ DIP("xvtdivsp cr%d,v%d,v%d\n", crfD, XA, XB);
breakV128to4x32( getVSReg( XA ), &a3, &a2, &a1, &a0 );
breakV128to4x32( getVSReg( XB ), &b3, &b2, &b1, &b0 );
assign(frB, unop(Iop_V128HIto64, getVSReg( XB )));
assign(frB2, unop(Iop_V128to64, getVSReg( XB )));
- DIP("%s v%d,v%d\n", redp ? "xvredp" : "xvrsqrtedp", (UInt)XT, (UInt)XB);
+ DIP("%s v%d,v%d\n", redp ? "xvredp" : "xvrsqrtedp", XT, XB);
if (!redp) {
assign( sqrtHi,
binop( Iop_SqrtF64,
IRExpr* ieee_one = IRExpr_Const(IRConst_F64i(0x3ff0000000000000ULL));
b3 = b2 = b1 = b0 = IRTemp_INVALID;
- DIP("%s v%d,v%d\n", resp ? "xvresp" : "xvrsqrtesp", (UInt)XT, (UInt)XB);
+ DIP("%s v%d,v%d\n", resp ? "xvresp" : "xvrsqrtesp", XT, XB);
breakV128to4xF64( getVSReg( XB ), &b3, &b2, &b1, &b0 );
if (!resp) {
a3 = a2 = a1 = a0 = IRTemp_INVALID;
b3 = b2 = b1 = b0 = IRTemp_INVALID;
- DIP("%s v%d,v%d v%d\n", isMin ? "xvminsp" : "xvmaxsp", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("%s v%d,v%d v%d\n", isMin ? "xvminsp" : "xvmaxsp", XT, XA, XB);
breakV128to4xF64( getVSReg( XA ), &a3, &a2, &a1, &a0 );
breakV128to4xF64( getVSReg( XB ), &b3, &b2, &b1, &b0 );
assign( a0_I64, unop( Iop_ReinterpF64asI64, mkexpr( a0 ) ) );
assign(frB, unop(Iop_V128HIto64, getVSReg( XB )));
assign(frA2, unop(Iop_V128to64, getVSReg( XA )));
assign(frB2, unop(Iop_V128to64, getVSReg( XB )));
- DIP("%s v%d,v%d v%d\n", isMin ? "xvmindp" : "xvmaxdp", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("%s v%d,v%d v%d\n", isMin ? "xvmindp" : "xvmaxdp", XT, XA, XB);
putVSReg( XT, binop( Iop_64HLtoV128, get_max_min_fp(frA, frB, isMin), get_max_min_fp(frA2, frB2, isMin) ) );
break;
assign(frA2, unop(Iop_V128to64, getVSReg( XA )));
assign(frB2, unop(Iop_V128to64, getVSReg( XB )));
- DIP("xvcpsgndp v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xvcpsgndp v%d,v%d,v%d\n", XT, XA, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
binop( Iop_Or64,
a3_I64 = a2_I64 = a1_I64 = a0_I64 = IRTemp_INVALID;
b3_I64 = b2_I64 = b1_I64 = b0_I64 = IRTemp_INVALID;
- DIP("xvcpsgnsp v%d,v%d v%d\n",(UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xvcpsgnsp v%d,v%d v%d\n",XT, XA, XB);
breakV128to4x64U( getVSReg( XA ), &a3_I64, &a2_I64, &a1_I64, &a0_I64 );
breakV128to4x64U( getVSReg( XB ), &b3_I64, &b2_I64, &b1_I64, &b0_I64 );
assign(frB, unop(Iop_ReinterpI64asF64, unop(Iop_V128HIto64, getVSReg( XB ))));
assign(frB2, unop(Iop_ReinterpI64asF64, unop(Iop_V128to64, getVSReg(XB))));
- DIP("xv%sabsdp v%d,v%d\n", make_negative ? "n" : "", (UInt)XT, (UInt)XB);
+ DIP("xv%sabsdp v%d,v%d\n", make_negative ? "n" : "", XT, XB);
if (make_negative) {
assign(abs_resultHi, unop( Iop_NegF64, unop( Iop_AbsF64, mkexpr( frB ) ) ) );
assign(abs_resultLo, unop( Iop_NegF64, unop( Iop_AbsF64, mkexpr( frB2 ) ) ) );
IRTemp frB2 = newTemp(Ity_F64);
assign(frB, unop(Iop_ReinterpI64asF64, unop(Iop_V128HIto64, getVSReg( XB ))));
assign(frB2, unop(Iop_ReinterpI64asF64, unop(Iop_V128to64, getVSReg(XB))));
- DIP("xvnegdp v%d,v%d\n", (UInt)XT, (UInt)XB);
+ DIP("xvnegdp v%d,v%d\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
unop( Iop_ReinterpF64asI64,
assign( frBLo_I64, unop( Iop_V128to64, getVSReg( XB ) ) );
frD_fp_roundLo = _do_vsx_fp_roundToInt(frBLo_I64, opc2);
- DIP("xvrdpi%s v%d,v%d\n", _get_vsx_rdpi_suffix(opc2), (UInt)XT, (UInt)XB);
+ DIP("xvrdpi%s v%d,v%d\n", _get_vsx_rdpi_suffix(opc2), XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
unop( Iop_ReinterpF64asI64, frD_fp_roundHi ),
vex_printf("Unrecognized opcode %x\n", opc2);
vpanic("dis_vxv_misc(ppc)(vrspi<x>)(opc2)\n");
}
- DIP("xvrspi%s v%d,v%d\n", insn_suffix, (UInt)XT, (UInt)XB);
+ DIP("xvrspi%s v%d,v%d\n", insn_suffix, XT, XB);
putVSReg( XT, unop( op, getVSReg(XB) ) );
} else {
// Handle xvrspic. Unfortunately there is no corresponding "vfric" instruction.
_do_vsx_fp_roundToInt(b1_I64, opc2));
frD_fp_roundb0 = unop(Iop_TruncF64asF32,
_do_vsx_fp_roundToInt(b0_I64, opc2));
- DIP("xvrspic v%d,v%d\n", (UInt)XT, (UInt)XB);
+ DIP("xvrspic v%d,v%d\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
binop( Iop_32HLto64,
*/
switch (opc2) {
case 0x000: // xsaddsp (VSX Scalar Add Single-Precision)
- DIP("xsaddsp v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xsaddsp v%d,v%d,v%d\n", XT, XA, XB);
putVSReg( XT, binop( Iop_64HLtoV128,
unop( Iop_ReinterpF64asI64,
binop( Iop_RoundF64toF32, rm,
mkU64( 0 ) ) );
break;
case 0x020: // xssubsp (VSX Scalar Subtract Single-Precision)
- DIP("xssubsp v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xssubsp v%d,v%d,v%d\n", XT, XA, XB);
putVSReg( XT, binop( Iop_64HLtoV128,
unop( Iop_ReinterpF64asI64,
binop( Iop_RoundF64toF32, rm,
mkU64( 0 ) ) );
break;
case 0x080: // xsadddp (VSX scalar add double-precision)
- DIP("xsadddp v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xsadddp v%d,v%d,v%d\n", XT, XA, XB);
putVSReg( XT, binop( Iop_64HLtoV128, unop( Iop_ReinterpF64asI64,
triop( Iop_AddF64, rm,
mkexpr( frA ),
mkU64( 0 ) ) );
break;
case 0x060: // xsdivsp (VSX scalar divide single-precision)
- DIP("xsdivsp v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xsdivsp v%d,v%d,v%d\n", XT, XA, XB);
putVSReg( XT, binop( Iop_64HLtoV128,
unop( Iop_ReinterpF64asI64,
binop( Iop_RoundF64toF32, rm,
mkU64( 0 ) ) );
break;
case 0x0E0: // xsdivdp (VSX scalar divide double-precision)
- DIP("xsdivdp v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xsdivdp v%d,v%d,v%d\n", XT, XA, XB);
putVSReg( XT, binop( Iop_64HLtoV128, unop( Iop_ReinterpF64asI64,
triop( Iop_DivF64, rm,
mkexpr( frA ),
{
IRTemp frT = newTemp(Ity_F64);
Bool mdp = opc2 == 0x024;
- DIP("xsmadd%ssp v%d,v%d,v%d\n", mdp ? "m" : "a", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xsmadd%ssp v%d,v%d,v%d\n", mdp ? "m" : "a", XT, XA, XB);
assign( frT, unop( Iop_ReinterpI64asF64, unop( Iop_V128HIto64,
getVSReg( XT ) ) ) );
putVSReg( XT,
{
IRTemp frT = newTemp(Ity_F64);
Bool mdp = opc2 == 0x0A4;
- DIP("xsmadd%sdp v%d,v%d,v%d\n", mdp ? "m" : "a", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xsmadd%sdp v%d,v%d,v%d\n", mdp ? "m" : "a", XT, XA, XB);
assign( frT, unop( Iop_ReinterpI64asF64, unop( Iop_V128HIto64,
getVSReg( XT ) ) ) );
putVSReg( XT, binop( Iop_64HLtoV128, unop( Iop_ReinterpF64asI64,
{
IRTemp frT = newTemp(Ity_F64);
Bool mdp = opc2 == 0x064;
- DIP("xsmsub%ssp v%d,v%d,v%d\n", mdp ? "m" : "a", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xsmsub%ssp v%d,v%d,v%d\n", mdp ? "m" : "a", XT, XA, XB);
assign( frT, unop( Iop_ReinterpI64asF64, unop( Iop_V128HIto64,
getVSReg( XT ) ) ) );
putVSReg( XT,
{
IRTemp frT = newTemp(Ity_F64);
Bool mdp = opc2 == 0x0E4;
- DIP("xsmsub%sdp v%d,v%d,v%d\n", mdp ? "m" : "a", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xsmsub%sdp v%d,v%d,v%d\n", mdp ? "m" : "a", XT, XA, XB);
assign( frT, unop( Iop_ReinterpI64asF64, unop( Iop_V128HIto64,
getVSReg( XT ) ) ) );
putVSReg( XT, binop( Iop_64HLtoV128, unop( Iop_ReinterpF64asI64,
IRTemp frT = newTemp(Ity_F64);
IRTemp maddResult = newTemp(Ity_I64);
- DIP("xsnmadd%sdp v%d,v%d,v%d\n", mdp ? "m" : "a", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xsnmadd%sdp v%d,v%d,v%d\n", mdp ? "m" : "a", XT, XA, XB);
assign( frT, unop( Iop_ReinterpI64asF64, unop( Iop_V128HIto64,
getVSReg( XT ) ) ) );
assign( maddResult, unop( Iop_ReinterpF64asI64, qop( Iop_MAddF64, rm,
IRTemp frT = newTemp(Ity_F64);
IRTemp maddResult = newTemp(Ity_I64);
- DIP("xsnmadd%ssp v%d,v%d,v%d\n", mdp ? "m" : "a", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xsnmadd%ssp v%d,v%d,v%d\n", mdp ? "m" : "a", XT, XA, XB);
assign( frT, unop( Iop_ReinterpI64asF64, unop( Iop_V128HIto64,
getVSReg( XT ) ) ) );
assign( maddResult,
Bool mdp = opc2 == 0x264;
IRTemp msubResult = newTemp(Ity_I64);
- DIP("xsnmsub%ssp v%d,v%d,v%d\n", mdp ? "m" : "a", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xsnmsub%ssp v%d,v%d,v%d\n", mdp ? "m" : "a", XT, XA, XB);
assign( frT, unop( Iop_ReinterpI64asF64, unop( Iop_V128HIto64,
getVSReg( XT ) ) ) );
assign( msubResult,
Bool mdp = opc2 == 0x2E4;
IRTemp msubResult = newTemp(Ity_I64);
- DIP("xsnmsub%sdp v%d,v%d,v%d\n", mdp ? "m" : "a", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xsnmsub%sdp v%d,v%d,v%d\n", mdp ? "m" : "a", XT, XA, XB);
assign( frT, unop( Iop_ReinterpI64asF64, unop( Iop_V128HIto64,
getVSReg( XT ) ) ) );
assign(msubResult, unop( Iop_ReinterpF64asI64,
}
case 0x040: // xsmulsp (VSX Scalar Multiply Single-Precision)
- DIP("xsmulsp v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xsmulsp v%d,v%d,v%d\n", XT, XA, XB);
putVSReg( XT, binop( Iop_64HLtoV128,
unop( Iop_ReinterpF64asI64,
binop( Iop_RoundF64toF32, rm,
break;
case 0x0C0: // xsmuldp (VSX Scalar Multiply Double-Precision)
- DIP("xsmuldp v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xsmuldp v%d,v%d,v%d\n", XT, XA, XB);
putVSReg( XT, binop( Iop_64HLtoV128, unop( Iop_ReinterpF64asI64,
triop( Iop_MulF64, rm,
mkexpr( frA ),
mkU64( 0 ) ) );
break;
case 0x0A0: // xssubdp (VSX Scalar Subtract Double-Precision)
- DIP("xssubdp v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xssubdp v%d,v%d,v%d\n", XT, XA, XB);
putVSReg( XT, binop( Iop_64HLtoV128, unop( Iop_ReinterpF64asI64,
triop( Iop_SubF64, rm,
mkexpr( frA ),
break;
case 0x016: // xssqrtsp (VSX Scalar Square Root Single-Precision)
- DIP("xssqrtsp v%d,v%d\n", (UInt)XT, (UInt)XB);
+ DIP("xssqrtsp v%d,v%d\n", XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
unop( Iop_ReinterpF64asI64,
break;
case 0x096: // xssqrtdp (VSX Scalar Square Root Double-Precision)
- DIP("xssqrtdp v%d,v%d\n", (UInt)XT, (UInt)XB);
+ DIP("xssqrtdp v%d,v%d\n", XT, XB);
putVSReg( XT, binop( Iop_64HLtoV128, unop( Iop_ReinterpF64asI64,
binop( Iop_SqrtF64, rm,
mkexpr( frB ) ) ),
UChar crfD = toUChar( IFIELD( theInstr, 23, 3 ) );
IRTemp frA_I64 = newTemp(Ity_I64);
IRTemp frB_I64 = newTemp(Ity_I64);
- DIP("xstdivdp crf%d,v%d,v%d\n", crfD, (UInt)XA, (UInt)XB);
+ DIP("xstdivdp crf%d,v%d,v%d\n", crfD, XA, XB);
assign( frA_I64, unop( Iop_ReinterpF64asI64, mkexpr( frA ) ) );
assign( frB_I64, unop( Iop_ReinterpF64asI64, mkexpr( frB ) ) );
putGST_field( PPC_GST_CR, do_fp_tdiv(frA_I64, frB_I64), crfD );
IRTemp flags = newTemp(Ity_I32);
IRTemp fe_flag, fg_flag;
fe_flag = fg_flag = IRTemp_INVALID;
- DIP("xstsqrtdp v%d,v%d\n", (UInt)XT, (UInt)XB);
+ DIP("xstsqrtdp v%d,v%d\n", XT, XB);
assign( frB_I64, unop(Iop_V128HIto64, getVSReg( XB )) );
do_fp_tsqrt(frB_I64, False /*not single precision*/, &fe_flag, &fg_flag);
/* The CR field consists of fl_flag || fg_flag || fe_flag || 0b0
/* Note: Differences between xscmpudp and xscmpodp are only in
* exception flag settings, which aren't supported anyway. */
DIP("xscmp%sdp crf%d,fr%u,fr%u\n", opc2 == 0x08c ? "u" : "o",
- crfD, (UInt)XA, (UInt)XB);
+ crfD, XA, XB);
ccPPC32 = get_fp_cmp_CR_val( binop(Iop_CmpF64, mkexpr(frA), mkexpr(frB)));
putGST_field( PPC_GST_CR, mkexpr(ccPPC32), crfD );
break;
case 0x18C: case 0x38C: // xvcmpeqdp[.] (VSX Vector Compare Equal To Double-Precision [ & Record ])
{
DIP("xvcmpeqdp%s crf%d,fr%u,fr%u\n", (flag_rC ? ".":""),
- (UInt)XT, (UInt)XA, (UInt)XB);
+ XT, XA, XB);
do_vvec_fp_cmp(vA, vB, XT, flag_rC, PPC_CMP_EQ);
break;
}
case 0x1CC: case 0x3CC: // xvcmpgedp[.] (VSX Vector Compare Greater Than or Equal To Double-Precision [ & Record ])
{
DIP("xvcmpgedp%s crf%d,fr%u,fr%u\n", (flag_rC ? ".":""),
- (UInt)XT, (UInt)XA, (UInt)XB);
+ XT, XA, XB);
do_vvec_fp_cmp(vA, vB, XT, flag_rC, PPC_CMP_GE);
break;
}
case 0x1AC: case 0x3AC: // xvcmpgtdp[.] (VSX Vector Compare Greater Than Double-Precision [ & Record ])
{
DIP("xvcmpgtdp%s crf%d,fr%u,fr%u\n", (flag_rC ? ".":""),
- (UInt)XT, (UInt)XA, (UInt)XB);
+ XT, XA, XB);
do_vvec_fp_cmp(vA, vB, XT, flag_rC, PPC_CMP_GT);
break;
}
IRTemp vD = newTemp(Ity_V128);
DIP("xvcmpeqsp%s crf%d,fr%u,fr%u\n", (flag_rC ? ".":""),
- (UInt)XT, (UInt)XA, (UInt)XB);
+ XT, XA, XB);
assign( vD, binop(Iop_CmpEQ32Fx4, mkexpr(vA), mkexpr(vB)) );
putVSReg( XT, mkexpr(vD) );
if (flag_rC) {
IRTemp vD = newTemp(Ity_V128);
DIP("xvcmpgesp%s crf%d,fr%u,fr%u\n", (flag_rC ? ".":""),
- (UInt)XT, (UInt)XA, (UInt)XB);
+ XT, XA, XB);
assign( vD, binop(Iop_CmpGE32Fx4, mkexpr(vA), mkexpr(vB)) );
putVSReg( XT, mkexpr(vD) );
if (flag_rC) {
IRTemp vD = newTemp(Ity_V128);
DIP("xvcmpgtsp%s crf%d,fr%u,fr%u\n", (flag_rC ? ".":""),
- (UInt)XT, (UInt)XA, (UInt)XB);
+ XT, XA, XB);
assign( vD, binop(Iop_CmpGT32Fx4, mkexpr(vA), mkexpr(vB)) );
putVSReg( XT, mkexpr(vD) );
if (flag_rC) {
binop(Iop_ShlV128, mkexpr(vB),
mkU8(1)), mkU8(1)));
}
- DIP("xsabsdp v%d,v%d\n", (UInt)XT, (UInt)XB);
+ DIP("xsabsdp v%d,v%d\n", XT, XB);
putVSReg(XT, mkexpr(absVal));
break;
}
IRTemp vecA_signed = newTemp(Ity_I64);
IRTemp vecB_unsigned = newTemp(Ity_I64);
IRTemp vec_result = newTemp(Ity_V128);
- DIP("xscpsgndp v%d,v%d v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xscpsgndp v%d,v%d v%d\n", XT, XA, XB);
assign( vecA_signed, binop( Iop_And64,
unop( Iop_V128HIto64,
mkexpr(vA)),
{
/* Scalar negative absolute value double-precision */
IRTemp BHi_signed = newTemp(Ity_I64);
- DIP("xsnabsdp v%d,v%d\n", (UInt)XT, (UInt)XB);
+ DIP("xsnabsdp v%d,v%d\n", XT, XB);
assign( BHi_signed, binop( Iop_Or64,
unop( Iop_V128HIto64,
mkexpr(vB) ),
IRTemp BHi_negated = newTemp(Ity_I64);
IRTemp BHi_negated_signbit = newTemp(Ity_I1);
IRTemp vec_result = newTemp(Ity_V128);
- DIP("xsnabsdp v%d,v%d\n", (UInt)XT, (UInt)XB);
+ DIP("xsnabsdp v%d,v%d\n", XT, XB);
assign( BHi_signed, unop( Iop_V128HIto64, mkexpr(vB) ) );
assign( BHi_unsigned, binop( Iop_And64, mkexpr(BHi_signed),
mkU64(VG_PPC_SIGN_MASK) ) );
IRTemp frA = newTemp(Ity_I64);
IRTemp frB = newTemp(Ity_I64);
Bool isMin = opc2 == 0x2A0 ? True : False;
- DIP("%s v%d,v%d v%d\n", isMin ? "xsmaxdp" : "xsmindp", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("%s v%d,v%d v%d\n", isMin ? "xsmaxdp" : "xsmindp", XT, XA, XB);
assign(frA, unop(Iop_V128HIto64, mkexpr( vA )));
assign(frB, unop(Iop_V128HIto64, mkexpr( vB )));
assign(frB_I64, unop(Iop_V128HIto64, mkexpr( vB )));
frD_fp_round = _do_vsx_fp_roundToInt(frB_I64, opc2);
- DIP("xsrdpi%s v%d,v%d\n", _get_vsx_rdpi_suffix(opc2), (UInt)XT, (UInt)XB);
+ DIP("xsrdpi%s v%d,v%d\n", _get_vsx_rdpi_suffix(opc2), XT, XB);
putVSReg( XT,
binop( Iop_64HLtoV128,
unop( Iop_ReinterpF64asI64, frD_fp_round),
IRExpr* ieee_one = IRExpr_Const(IRConst_F64i(0x3ff0000000000000ULL));
IRExpr* rm = get_IR_roundingmode();
Bool redp = opc2 == 0x034;
- DIP("%s v%d,v%d\n", redp ? "xsresp" : "xsrsqrtesp", (UInt)XT,
- (UInt)XB);
+ DIP("%s v%d,v%d\n", redp ? "xsresp" : "xsrsqrtesp", XT,
+ XB);
assign( frB,
unop( Iop_ReinterpI64asF64,
IRExpr* ieee_one = IRExpr_Const(IRConst_F64i(0x3ff0000000000000ULL));
IRExpr* rm = get_IR_roundingmode();
Bool redp = opc2 == 0x0B4;
- DIP("%s v%d,v%d\n", redp ? "xsredp" : "xsrsqrtedp", (UInt)XT, (UInt)XB);
+ DIP("%s v%d,v%d\n", redp ? "xsredp" : "xsrsqrtedp", XT, XB);
assign( frB,
unop( Iop_ReinterpI64asF64,
unop( Iop_V128HIto64, mkexpr( vB ) ) ) );
{
IRTemp frB = newTemp(Ity_F64);
IRExpr* rm = get_IR_roundingmode();
- DIP("xsrsp v%d, v%d\n", (UInt)XT, (UInt)XB);
+ DIP("xsrsp v%d, v%d\n", XT, XB);
assign( frB,
unop( Iop_ReinterpI64asF64,
unop( Iop_V128HIto64, mkexpr( vB ) ) ) );
switch (opc2) {
case 0x268: // xxlxor
- DIP("xxlxor v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xxlxor v%d,v%d,v%d\n", XT, XA, XB);
putVSReg( XT, binop( Iop_XorV128, mkexpr( vA ), mkexpr( vB ) ) );
break;
case 0x248: // xxlor
- DIP("xxlor v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xxlor v%d,v%d,v%d\n", XT, XA, XB);
putVSReg( XT, binop( Iop_OrV128, mkexpr( vA ), mkexpr( vB ) ) );
break;
case 0x288: // xxlnor
- DIP("xxlnor v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xxlnor v%d,v%d,v%d\n", XT, XA, XB);
putVSReg( XT, unop( Iop_NotV128, binop( Iop_OrV128, mkexpr( vA ),
mkexpr( vB ) ) ) );
break;
case 0x208: // xxland
- DIP("xxland v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xxland v%d,v%d,v%d\n", XT, XA, XB);
putVSReg( XT, binop( Iop_AndV128, mkexpr( vA ), mkexpr( vB ) ) );
break;
case 0x228: //xxlandc
- DIP("xxlandc v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xxlandc v%d,v%d,v%d\n", XT, XA, XB);
putVSReg( XT, binop( Iop_AndV128, mkexpr( vA ), unop( Iop_NotV128,
mkexpr( vB ) ) ) );
break;
case 0x2A8: // xxlorc (VSX Logical OR with complement)
- DIP("xxlorc v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xxlorc v%d,v%d,v%d\n", XT, XA, XB);
putVSReg( XT, binop( Iop_OrV128,
mkexpr( vA ),
unop( Iop_NotV128, mkexpr( vB ) ) ) );
break;
case 0x2C8: // xxlnand (VSX Logical NAND)
- DIP("xxlnand v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xxlnand v%d,v%d,v%d\n", XT, XA, XB);
putVSReg( XT, unop( Iop_NotV128,
binop( Iop_AndV128, mkexpr( vA ),
mkexpr( vB ) ) ) );
break;
case 0x2E8: // xxleqv (VSX Logical Equivalence)
- DIP("xxleqv v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xxleqv v%d,v%d,v%d\n", XT, XA, XB);
putVSReg( XT, unop( Iop_NotV128,
binop( Iop_XorV128,
mkexpr( vA ), mkexpr( vB ) ) ) );
case 0x00C: // lxsiwzx (Load VSX Scalar as Integer Word and Zero Indexed)
{
IRExpr * exp;
- DIP("lxsiwzx %d,r%u,r%u\n", (UInt)XT, rA_addr, rB_addr);
+ DIP("lxsiwzx %d,r%u,r%u\n", XT, rA_addr, rB_addr);
if (host_endness == VexEndnessLE)
exp = unop( Iop_64to32, load( Ity_I64, mkexpr( EA ) ) );
case 0x04C: // lxsiwax (Load VSX Scalar as Integer Word Algebraic Indexed)
{
IRExpr * exp;
- DIP("lxsiwax %d,r%u,r%u\n", (UInt)XT, rA_addr, rB_addr);
+ DIP("lxsiwax %d,r%u,r%u\n", XT, rA_addr, rB_addr);
if (host_endness == VexEndnessLE)
exp = unop( Iop_64to32, load( Ity_I64, mkexpr( EA ) ) );
case 0x20C: // lxsspx (Load VSX Scalar Single-Precision Indexed)
{
IRExpr * exp;
- DIP("lxsspx %d,r%u,r%u\n", (UInt)XT, rA_addr, rB_addr);
+ DIP("lxsspx %d,r%u,r%u\n", XT, rA_addr, rB_addr);
/* Take 32-bit floating point value in the upper half of the fetched
* 64-bit value, convert to 64-bit floating point value and load into
* top word of V128.
case 0x24C: // lxsdx
{
IRExpr * exp;
- DIP("lxsdx %d,r%u,r%u\n", (UInt)XT, rA_addr, rB_addr);
+ DIP("lxsdx %d,r%u,r%u\n", XT, rA_addr, rB_addr);
exp = load( Ity_I64, mkexpr( EA ) );
// We need to pass an expression of type Ity_V128 with putVSReg, but the load
// we just performed is only a DW. But since the contents of VSR[XT] element 1
IRExpr * high, *low;
ULong ea_off = 8;
IRExpr* high_addr;
- DIP("lxvd2x %d,r%u,r%u\n", (UInt)XT, rA_addr, rB_addr);
+ DIP("lxvd2x %d,r%u,r%u\n", XT, rA_addr, rB_addr);
high = load( Ity_I64, mkexpr( EA ) );
high_addr = binop( addOp, mkexpr( EA ), ty == Ity_I64 ? mkU64( ea_off )
: mkU32( ea_off ) );
case 0x14C: // lxvdsx
{
IRTemp data = newTemp(Ity_I64);
- DIP("lxvdsx %d,r%u,r%u\n", (UInt)XT, rA_addr, rB_addr);
+ DIP("lxvdsx %d,r%u,r%u\n", XT, rA_addr, rB_addr);
assign( data, load( Ity_I64, mkexpr( EA ) ) );
putVSReg( XT, binop( Iop_64HLtoV128, mkexpr( data ), mkexpr( data ) ) );
break;
{
IRExpr *t0;
- DIP("lxvw4x %d,r%u,r%u\n", (UInt)XT, rA_addr, rB_addr);
+ DIP("lxvw4x %d,r%u,r%u\n", XT, rA_addr, rB_addr);
/* The load will result in the data being in BE order. */
if (host_endness == VexEndnessLE) {
* the 128-bit vector.
*/
IRExpr * high64, * low32;
- DIP("stxsiwx %d,r%u,r%u\n", (UInt)XS, rA_addr, rB_addr);
+ DIP("stxsiwx %d,r%u,r%u\n", XS, rA_addr, rB_addr);
high64 = unop( Iop_V128HIto64, mkexpr( vS ) );
low32 = unop( Iop_64to32, high64 );
store( mkexpr( EA ), low32 );
{
IRTemp high64 = newTemp(Ity_F64);
IRTemp val32 = newTemp(Ity_I32);
- DIP("stxsspx %d,r%u,r%u\n", (UInt)XS, rA_addr, rB_addr);
+ DIP("stxsspx %d,r%u,r%u\n", XS, rA_addr, rB_addr);
assign(high64, unop( Iop_ReinterpI64asF64,
unop( Iop_V128HIto64, mkexpr( vS ) ) ) );
assign(val32, unop( Iop_ReinterpF32asI32,
case 0x2CC:
{
IRExpr * high64;
- DIP("stxsdx %d,r%u,r%u\n", (UInt)XS, rA_addr, rB_addr);
+ DIP("stxsdx %d,r%u,r%u\n", XS, rA_addr, rB_addr);
high64 = unop( Iop_V128HIto64, mkexpr( vS ) );
store( mkexpr( EA ), high64 );
break;
case 0x3CC:
{
IRExpr * high64, *low64;
- DIP("stxvd2x %d,r%u,r%u\n", (UInt)XS, rA_addr, rB_addr);
+ DIP("stxvd2x %d,r%u,r%u\n", XS, rA_addr, rB_addr);
high64 = unop( Iop_V128HIto64, mkexpr( vS ) );
low64 = unop( Iop_V128to64, mkexpr( vS ) );
store( mkexpr( EA ), high64 );
IRTemp hi64 = newTemp( Ity_I64 );
IRTemp lo64 = newTemp( Ity_I64 );
- DIP("stxvw4x %d,r%u,r%u\n", (UInt)XS, rA_addr, rB_addr);
+ DIP("stxvw4x %d,r%u,r%u\n", XS, rA_addr, rB_addr);
// This instruction supports word-aligned stores, so EA may not be
// quad-word aligned. Therefore, do 4 individual word-size stores.
assign ( result, binop(Iop_OrV128, mkexpr(hi), mkexpr(lo)) );
} else
assign ( result, mkexpr(vA) );
- DIP("xxsldwi v%d,v%d,v%d,%d\n", (UInt)XT, (UInt)XA, (UInt)XB, (UInt)SHW);
+ DIP("xxsldwi v%d,v%d,v%d,%d\n", XT, XA, XB, SHW);
putVSReg( XT, mkexpr(result) );
break;
}
assign( vT, binop(Iop_64HLtoV128, mkexpr(hi), mkexpr(lo)) );
- DIP("xxpermdi v%d,v%d,v%d,0x%x\n", (UInt)XT, (UInt)XA, (UInt)XB, (UInt)DM);
+ DIP("xxpermdi v%d,v%d,v%d,0x%x\n", XT, XA, XB, DM);
putVSReg( XT, mkexpr( vT ) );
break;
}
binop(Iop_32HLto64, mkexpr(ahi32), mkexpr(bhi32)),
binop(Iop_32HLto64, mkexpr(alo32), mkexpr(blo32))) );
- DIP("xxmrg%cw v%d,v%d,v%d\n", type, (UInt)XT, (UInt)XA, (UInt)XB);
+ DIP("xxmrg%cw v%d,v%d,v%d\n", type, XT, XA, XB);
putVSReg( XT, mkexpr( vT ) );
break;
}
UChar XC = ifieldRegXC(theInstr);
IRTemp vC = newTemp( Ity_V128 );
assign( vC, getVSReg( XC ) );
- DIP("xxsel v%d,v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB, (UInt)XC);
+ DIP("xxsel v%d,v%d,v%d,v%d\n", XT, XA, XB, XC);
/* vD = (vA & ~vC) | (vB & vC) */
putVSReg( XT, binop(Iop_OrV128,
binop(Iop_AndV128, mkexpr(vA), unop(Iop_NotV128, mkexpr(vC))),
{
UChar UIM = ifieldRegA(theInstr) & 3;
UChar sh_uim = (3 - (UIM)) * 32;
- DIP("xxspltw v%d,v%d,%d\n", (UInt)XT, (UInt)XB, UIM);
+ DIP("xxspltw v%d,v%d,%d\n", XT, XB, UIM);
putVSReg( XT,
unop( Iop_Dup32x4,
unop( Iop_V128to32,
as the reason for failing the trasaction */
UInt tm_exact = 1; /* have exact address for failure */
- DIP("tbegin. %d\n", R);
+ DIP("tbegin. %u\n", R);
/* Set the CR0 field to indicate the tbegin failed. Then let
* the code do the branch to the failure path.
/* The tend. is just a noop. Do nothing */
UInt A = IFIELD( theInstr, 25, 1 );
- DIP("tend. %d\n", A);
+ DIP("tend. %u\n", A);
break;
}
/* The tsr. is just a noop. Do nothing */
UInt L = IFIELD( theInstr, 21, 1 );
- DIP("tsr. %d\n", L);
+ DIP("tsr. %u\n", L);
break;
}
/* The tcheck. is just a noop. Do nothing */
UInt BF = IFIELD( theInstr, 25, 1 );
- DIP("tcheck. %d\n", BF);
+ DIP("tcheck. %u\n", BF);
break;
}
UInt RA = IFIELD( theInstr, 16, 5 );
UInt RB = IFIELD( theInstr, 11, 5 );
- DIP("tabortwc. %d,%d,%d\n", TO, RA, RB);
+ DIP("tabortwc. %u,%u,%u\n", TO, RA, RB);
break;
}
UInt RA = IFIELD( theInstr, 16, 5 );
UInt RB = IFIELD( theInstr, 11, 5 );
- DIP("tabortdc. %d,%d,%d\n", TO, RA, RB);
+ DIP("tabortdc. %u,%u,%u\n", TO, RA, RB);
break;
}
UInt RA = IFIELD( theInstr, 16, 5 );
UInt SI = IFIELD( theInstr, 11, 5 );
- DIP("tabortwci. %d,%d,%d\n", TO, RA, SI);
+ DIP("tabortwci. %u,%u,%u\n", TO, RA, SI);
break;
}
UInt RA = IFIELD( theInstr, 16, 5 );
UInt SI = IFIELD( theInstr, 11, 5 );
- DIP("tabortdci. %d,%d,%d\n", TO, RA, SI);
+ DIP("tabortdci. %u,%u,%u\n", TO, RA, SI);
break;
}
/* The tabort. is just a noop. Do nothing */
UInt RA = IFIELD( theInstr, 16, 5 );
- DIP("tabort. %d\n", RA);
+ DIP("tabort. %u\n", RA);
break;
}
/* The treclaim. is just a noop. Do nothing */
UInt RA = IFIELD( theInstr, 16, 5 );
- DIP("treclaim. %d\n", RA);
+ DIP("treclaim. %u\n", RA);
break;
}
IRType ty = Ity_I64;
if(!(iregNo < 56 || iregNo == 63 ||
(iregNo >= 70 && iregNo <= 73))) {
- vex_printf("iregNo=%d\n", iregNo);
+ vex_printf("iregNo=%u\n", iregNo);
vassert(0);
}
return IRExpr_Get(integerGuestRegOffset(iregNo), ty);
IRType ty = Ity_I64;
if(!(archreg < 56 || archreg == 63 || archreg == 70 ||
archreg == 72 || archreg == 73)) {
- vex_printf("archreg=%d\n", archreg);
+ vex_printf("archreg=%u\n", archreg);
vassert(0);
}
vassert(typeOfIRExpr(irsb->tyenv, e) == ty);
/* ! 14 */ case 0x15: case 0x16: case 0x17:
{ UChar rm = toUChar(mod_reg_rm & 7);
UInt d = getUDisp32(delta);
- DIS(buf, "%s0x%x(%s)", sorbTxt(sorb), (Int)d, nameIReg(4,rm));
+ DIS(buf, "%s0x%x(%s)", sorbTxt(sorb), d, nameIReg(4,rm));
*len = 5;
return disAMode_copy2tmp(
handleSegOverride(sorb,
void fp_do_op_ST_ST ( const HChar* op_txt, IROp op, UInt st_src, UInt st_dst,
Bool pop_after )
{
- DIP("f%s%s st(%d), st(%d)\n", op_txt, pop_after?"p":"",
- (Int)st_src, (Int)st_dst );
+ DIP("f%s%s st(%u), st(%u)\n", op_txt, pop_after?"p":"",
+ st_src, st_dst);
put_ST_UNCHECKED(
st_dst,
triop( op,
void fp_do_oprev_ST_ST ( const HChar* op_txt, IROp op, UInt st_src,
UInt st_dst, Bool pop_after )
{
- DIP("f%s%s st(%d), st(%d)\n", op_txt, pop_after?"p":"",
- (Int)st_src, (Int)st_dst );
+ DIP("f%s%s st(%u), st(%u)\n", op_txt, pop_after?"p":"",
+ st_src, st_dst);
put_ST_UNCHECKED(
st_dst,
triop( op,
/* %eflags(Z,P,C) = UCOMI( st(0), st(i) ) */
static void fp_do_ucomi_ST0_STi ( UInt i, Bool pop_after )
{
- DIP("fucomi%s %%st(0),%%st(%d)\n", pop_after ? "p" : "", (Int)i );
+ DIP("fucomi%s %%st(0),%%st(%u)\n", pop_after ? "p" : "", i);
/* This is a bit of a hack (and isn't really right). It sets
Z,P,C,O correctly, but forces A and S to zero, whereas the Intel
documentation implies A and S are unchanged.
break;
default:
- vex_printf("unhandled opc_aux = 0x%2x\n", gregOfRM(modrm));
+ vex_printf("unhandled opc_aux = 0x%2x\n", (UInt)gregOfRM(modrm));
vex_printf("first_opcode == 0xD8\n");
goto decode_fail;
}
/* Dunno if this is right */
case 0xD0 ... 0xD7: /* FCOM %st(?),%st(0) */
r_dst = (UInt)modrm - 0xD0;
- DIP("fcom %%st(0),%%st(%d)\n", (Int)r_dst);
+ DIP("fcom %%st(0),%%st(%u)\n", r_dst);
/* This forces C1 to zero, which isn't right. */
put_C3210(
binop( Iop_And32,
/* Dunno if this is right */
case 0xD8 ... 0xDF: /* FCOMP %st(?),%st(0) */
r_dst = (UInt)modrm - 0xD8;
- DIP("fcomp %%st(0),%%st(%d)\n", (Int)r_dst);
+ DIP("fcomp %%st(0),%%st(%u)\n", r_dst);
/* This forces C1 to zero, which isn't right. */
put_C3210(
binop( Iop_And32,
break;
default:
- vex_printf("unhandled opc_aux = 0x%2x\n", gregOfRM(modrm));
+ vex_printf("unhandled opc_aux = 0x%2x\n", (UInt)gregOfRM(modrm));
vex_printf("first_opcode == 0xD9\n");
goto decode_fail;
}
case 0xC0 ... 0xC7: /* FLD %st(?) */
r_src = (UInt)modrm - 0xC0;
- DIP("fld %%st(%d)\n", (Int)r_src);
+ DIP("fld %%st(%u)\n", r_src);
t1 = newTemp(Ity_F64);
assign(t1, get_ST(r_src));
fp_push();
case 0xC8 ... 0xCF: /* FXCH %st(?) */
r_src = (UInt)modrm - 0xC8;
- DIP("fxch %%st(%d)\n", (Int)r_src);
+ DIP("fxch %%st(%u)\n", r_src);
t1 = newTemp(Ity_F64);
t2 = newTemp(Ity_F64);
assign(t1, get_ST(0));
break;
default:
- vex_printf("unhandled opc_aux = 0x%2x\n", gregOfRM(modrm));
+ vex_printf("unhandled opc_aux = 0x%2x\n", (UInt)gregOfRM(modrm));
vex_printf("first_opcode == 0xDA\n");
goto decode_fail;
}
case 0xC0 ... 0xC7: /* FCMOVB ST(i), ST(0) */
r_src = (UInt)modrm - 0xC0;
- DIP("fcmovb %%st(%d), %%st(0)\n", (Int)r_src);
+ DIP("fcmovb %%st(%u), %%st(0)\n", r_src);
put_ST_UNCHECKED(0,
IRExpr_ITE(
mk_x86g_calculate_condition(X86CondB),
case 0xC8 ... 0xCF: /* FCMOVE(Z) ST(i), ST(0) */
r_src = (UInt)modrm - 0xC8;
- DIP("fcmovz %%st(%d), %%st(0)\n", (Int)r_src);
+ DIP("fcmovz %%st(%u), %%st(0)\n", r_src);
put_ST_UNCHECKED(0,
IRExpr_ITE(
mk_x86g_calculate_condition(X86CondZ),
case 0xD0 ... 0xD7: /* FCMOVBE ST(i), ST(0) */
r_src = (UInt)modrm - 0xD0;
- DIP("fcmovbe %%st(%d), %%st(0)\n", (Int)r_src);
+ DIP("fcmovbe %%st(%u), %%st(0)\n", r_src);
put_ST_UNCHECKED(0,
IRExpr_ITE(
mk_x86g_calculate_condition(X86CondBE),
case 0xD8 ... 0xDF: /* FCMOVU ST(i), ST(0) */
r_src = (UInt)modrm - 0xD8;
- DIP("fcmovu %%st(%d), %%st(0)\n", (Int)r_src);
+ DIP("fcmovu %%st(%u), %%st(0)\n", r_src);
put_ST_UNCHECKED(0,
IRExpr_ITE(
mk_x86g_calculate_condition(X86CondP),
}
default:
- vex_printf("unhandled opc_aux = 0x%2x\n", gregOfRM(modrm));
+ vex_printf("unhandled opc_aux = 0x%2x\n", (UInt)gregOfRM(modrm));
vex_printf("first_opcode == 0xDB\n");
goto decode_fail;
}
case 0xC0 ... 0xC7: /* FCMOVNB ST(i), ST(0) */
r_src = (UInt)modrm - 0xC0;
- DIP("fcmovnb %%st(%d), %%st(0)\n", (Int)r_src);
+ DIP("fcmovnb %%st(%u), %%st(0)\n", r_src);
put_ST_UNCHECKED(0,
IRExpr_ITE(
mk_x86g_calculate_condition(X86CondNB),
case 0xC8 ... 0xCF: /* FCMOVNE(NZ) ST(i), ST(0) */
r_src = (UInt)modrm - 0xC8;
- DIP("fcmovnz %%st(%d), %%st(0)\n", (Int)r_src);
+ DIP("fcmovnz %%st(%u), %%st(0)\n", r_src);
put_ST_UNCHECKED(0,
IRExpr_ITE(
mk_x86g_calculate_condition(X86CondNZ),
case 0xD0 ... 0xD7: /* FCMOVNBE ST(i), ST(0) */
r_src = (UInt)modrm - 0xD0;
- DIP("fcmovnbe %%st(%d), %%st(0)\n", (Int)r_src);
+ DIP("fcmovnbe %%st(%u), %%st(0)\n", r_src);
put_ST_UNCHECKED(0,
IRExpr_ITE(
mk_x86g_calculate_condition(X86CondNBE),
case 0xD8 ... 0xDF: /* FCMOVNU ST(i), ST(0) */
r_src = (UInt)modrm - 0xD8;
- DIP("fcmovnu %%st(%d), %%st(0)\n", (Int)r_src);
+ DIP("fcmovnu %%st(%u), %%st(0)\n", r_src);
put_ST_UNCHECKED(0,
IRExpr_ITE(
mk_x86g_calculate_condition(X86CondNP),
break;
default:
- vex_printf("unhandled opc_aux = 0x%2x\n", gregOfRM(modrm));
+ vex_printf("unhandled opc_aux = 0x%2x\n", (UInt)gregOfRM(modrm));
vex_printf("first_opcode == 0xDC\n");
goto decode_fail;
}
}
default:
- vex_printf("unhandled opc_aux = 0x%2x\n", gregOfRM(modrm));
+ vex_printf("unhandled opc_aux = 0x%2x\n", (UInt)gregOfRM(modrm));
vex_printf("first_opcode == 0xDD\n");
goto decode_fail;
}
case 0xC0 ... 0xC7: /* FFREE %st(?) */
r_dst = (UInt)modrm - 0xC0;
- DIP("ffree %%st(%d)\n", (Int)r_dst);
+ DIP("ffree %%st(%u)\n", r_dst);
put_ST_TAG ( r_dst, mkU8(0) );
break;
case 0xD0 ... 0xD7: /* FST %st(0),%st(?) */
r_dst = (UInt)modrm - 0xD0;
- DIP("fst %%st(0),%%st(%d)\n", (Int)r_dst);
+ DIP("fst %%st(0),%%st(%u)\n", r_dst);
/* P4 manual says: "If the destination operand is a
non-empty register, the invalid-operation exception
is not generated. Hence put_ST_UNCHECKED. */
case 0xD8 ... 0xDF: /* FSTP %st(0),%st(?) */
r_dst = (UInt)modrm - 0xD8;
- DIP("fstp %%st(0),%%st(%d)\n", (Int)r_dst);
+ DIP("fstp %%st(0),%%st(%u)\n", r_dst);
/* P4 manual says: "If the destination operand is a
non-empty register, the invalid-operation exception
is not generated. Hence put_ST_UNCHECKED. */
case 0xE0 ... 0xE7: /* FUCOM %st(0),%st(?) */
r_dst = (UInt)modrm - 0xE0;
- DIP("fucom %%st(0),%%st(%d)\n", (Int)r_dst);
+ DIP("fucom %%st(0),%%st(%u)\n", r_dst);
/* This forces C1 to zero, which isn't right. */
put_C3210(
binop( Iop_And32,
case 0xE8 ... 0xEF: /* FUCOMP %st(0),%st(?) */
r_dst = (UInt)modrm - 0xE8;
- DIP("fucomp %%st(0),%%st(%d)\n", (Int)r_dst);
+ DIP("fucomp %%st(0),%%st(%u)\n", r_dst);
/* This forces C1 to zero, which isn't right. */
put_C3210(
binop( Iop_And32,
break;
default:
- vex_printf("unhandled opc_aux = 0x%2x\n", gregOfRM(modrm));
+ vex_printf("unhandled opc_aux = 0x%2x\n", (UInt)gregOfRM(modrm));
vex_printf("first_opcode == 0xDE\n");
goto decode_fail;
}
break;
default:
- vex_printf("unhandled opc_aux = 0x%2x\n", gregOfRM(modrm));
+ vex_printf("unhandled opc_aux = 0x%2x\n", (UInt)gregOfRM(modrm));
vex_printf("first_opcode == 0xDF\n");
goto decode_fail;
}
case 0xFB: op = Iop_Sub64; break;
default:
- vex_printf("\n0x%x\n", (Int)opc);
+ vex_printf("\n0x%x\n", opc);
vpanic("dis_MMXop_regmem_to_reg");
}
getXMMReg(eregOfRM(rm))) );
delta += 2;
DIP("%s $%d,%s,%s\n", opname,
- (Int)imm8,
+ imm8,
nameXMMReg(eregOfRM(rm)),
nameXMMReg(gregOfRM(rm)) );
} else {
);
delta += alen+1;
DIP("%s $%d,%s,%s\n", opname,
- (Int)imm8,
+ imm8,
dis_buf,
nameXMMReg(gregOfRM(rm)) );
}
assign(t4, getIReg(2, eregOfRM(modrm)));
delta += 3+1;
lane = insn[3+1-1];
- DIP("pinsrw $%d,%s,%s\n", (Int)lane,
+ DIP("pinsrw $%d,%s,%s\n", lane,
nameIReg(2,eregOfRM(modrm)),
nameMMXReg(gregOfRM(modrm)));
} else {
delta += 3+alen;
lane = insn[3+alen-1];
assign(t4, loadLE(Ity_I16, mkexpr(addr)));
- DIP("pinsrw $%d,%s,%s\n", (Int)lane,
+ DIP("pinsrw $%d,%s,%s\n", lane,
dis_buf,
nameMMXReg(gregOfRM(modrm)));
}
assign(t4, getIReg(2, eregOfRM(modrm)));
delta += 3+1;
lane = insn[3+1-1];
- DIP("pinsrw $%d,%s,%s\n", (Int)lane,
+ DIP("pinsrw $%d,%s,%s\n", lane,
nameIReg(2,eregOfRM(modrm)),
nameXMMReg(gregOfRM(modrm)));
} else {
delta += 3+alen;
lane = insn[3+alen-1];
assign(t4, loadLE(Ity_I16, mkexpr(addr)));
- DIP("pinsrw $%d,%s,%s\n", (Int)lane,
+ DIP("pinsrw $%d,%s,%s\n", lane,
dis_buf,
nameXMMReg(gregOfRM(modrm)));
}
assign( sV, getMMXReg(eregOfRM(modrm)) );
d32 = (UInt)insn[3+1];
delta += 3+1+1;
- DIP("palignr $%d,%s,%s\n", (Int)d32,
+ DIP("palignr $%u,%s,%s\n", d32,
nameMMXReg(eregOfRM(modrm)),
nameMMXReg(gregOfRM(modrm)));
} else {
assign( sV, loadLE(Ity_I64, mkexpr(addr)) );
d32 = (UInt)insn[3+alen];
delta += 3+alen+1;
- DIP("palignr $%d%s,%s\n", (Int)d32,
+ DIP("palignr $%u%s,%s\n", d32,
dis_buf,
nameMMXReg(gregOfRM(modrm)));
}
assign( sV, getXMMReg(eregOfRM(modrm)) );
d32 = (UInt)insn[3+1];
delta += 3+1+1;
- DIP("palignr $%d,%s,%s\n", (Int)d32,
+ DIP("palignr $%u,%s,%s\n", d32,
nameXMMReg(eregOfRM(modrm)),
nameXMMReg(gregOfRM(modrm)));
} else {
assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
d32 = (UInt)insn[3+alen];
delta += 3+alen+1;
- DIP("palignr $%d,%s,%s\n", (Int)d32,
+ DIP("palignr $%u,%s,%s\n", d32,
dis_buf,
nameXMMReg(gregOfRM(modrm)));
}
d32 = getUDisp16(delta);
delta += 2;
dis_ret(&dres, d32);
- DIP("ret %d\n", (Int)d32);
+ DIP("ret %u\n", d32);
break;
case 0xC3: /* RET */
dis_ret(&dres, 0);
if (d32 >= 0x3F && d32 <= 0x4F) {
jmp_lit(&dres, Ijk_SigSEGV, ((Addr32)guest_EIP_bbstart)+delta-2);
vassert(dres.whatNext == Dis_StopHere);
- DIP("int $0x%x\n", (Int)d32);
+ DIP("int $0x%x\n", d32);
break;
}
mkU32(guest_EIP_curr_instr) ) );
jmp_lit(&dres, jump_kind, ((Addr32)guest_EIP_bbstart)+delta);
vassert(dres.whatNext == Dis_StopHere);
- DIP("int $0x%x\n", (Int)d32);
+ DIP("int $0x%x\n", d32);
break;
/* ------------------------ Jcond, byte offset --------- */
t1 = newTemp(Ity_I32);
abyte = getIByte(delta); delta++;
assign(t1, mkU32( abyte & 0xFF ));
- DIP("in%c $%d,%s\n", nameISize(sz), (Int)abyte, nameIReg(sz,R_EAX));
+ DIP("in%c $%d,%s\n", nameISize(sz), abyte, nameIReg(sz,R_EAX));
goto do_IN;
case 0xE5: /* IN imm8, eAX */
vassert(sz == 2 || sz == 4);
t1 = newTemp(Ity_I32);
abyte = getIByte(delta); delta++;
assign(t1, mkU32( abyte & 0xFF ));
- DIP("in%c $%d,%s\n", nameISize(sz), (Int)abyte, nameIReg(sz,R_EAX));
+ DIP("in%c $%d,%s\n", nameISize(sz), abyte, nameIReg(sz,R_EAX));
goto do_IN;
case 0xEC: /* IN %DX, AL */
sz = 1;
t1 = newTemp(Ity_I32);
abyte = getIByte(delta); delta++;
assign( t1, mkU32( abyte & 0xFF ) );
- DIP("out%c %s,$%d\n", nameISize(sz), nameIReg(sz,R_EAX), (Int)abyte);
+ DIP("out%c %s,$%d\n", nameISize(sz), nameIReg(sz,R_EAX), abyte);
goto do_OUT;
case 0xE7: /* OUT eAX, imm8 */
vassert(sz == 2 || sz == 4);
t1 = newTemp(Ity_I32);
abyte = getIByte(delta); delta++;
assign( t1, mkU32( abyte & 0xFF ) );
- DIP("out%c %s,$%d\n", nameISize(sz), nameIReg(sz,R_EAX), (Int)abyte);
+ DIP("out%c %s,$%d\n", nameISize(sz), nameIReg(sz,R_EAX), abyte);
goto do_OUT;
case 0xEE: /* OUT AL, %DX */
sz = 1;
if (sigill_diag) {
vex_printf("vex x86->IR: unhandled instruction bytes: "
"0x%x 0x%x 0x%x 0x%x\n",
- (Int)getIByte(delta_start+0),
- (Int)getIByte(delta_start+1),
- (Int)getIByte(delta_start+2),
- (Int)getIByte(delta_start+3) );
+ getIByte(delta_start+0),
+ getIByte(delta_start+1),
+ getIByte(delta_start+2),
+ getIByte(delta_start+3));
}
/* Tell the dispatcher that this insn cannot be decoded, and so has
ppHRegAMD64(i->Ain.SseCMov.dst);
return;
case Ain_SseShuf:
- vex_printf("pshufd $0x%x,", i->Ain.SseShuf.order);
+ vex_printf("pshufd $0x%x,", (UInt)i->Ain.SseShuf.order);
ppHRegAMD64(i->Ain.SseShuf.src);
vex_printf(",");
ppHRegAMD64(i->Ain.SseShuf.dst);
{
ppHRegARM(p->reg);
if (p->tag == ARMNRS_Scalar) {
- vex_printf("[%d]", p->index);
+ vex_printf("[%u]", p->index);
}
}
i->ARMin.NUnary.op == ARMneon_VCVTFtoFixedU ||
i->ARMin.NUnary.op == ARMneon_VCVTFixedStoF ||
i->ARMin.NUnary.op == ARMneon_VCVTFixedUtoF) {
- vex_printf(", #%d", i->ARMin.NUnary.size);
+ vex_printf(", #%u", i->ARMin.NUnary.size);
}
if (i->ARMin.NUnary.op == ARMneon_VQSHLNSS ||
i->ARMin.NUnary.op == ARMneon_VQSHLNUU ||
UInt size;
size = i->ARMin.NUnary.size;
if (size & 0x40) {
- vex_printf(", #%d", size - 64);
+ vex_printf(", #%u", size - 64);
} else if (size & 0x20) {
- vex_printf(", #%d", size - 32);
+ vex_printf(", #%u", size - 32);
} else if (size & 0x10) {
- vex_printf(", #%d", size - 16);
+ vex_printf(", #%u", size - 16);
} else if (size & 0x08) {
- vex_printf(", #%d", size - 8);
+ vex_printf(", #%u", size - 8);
}
}
return;
vex_printf(", ");
ppHRegARM(i->ARMin.Add32.rN);
vex_printf(", ");
- vex_printf("%d", i->ARMin.Add32.imm32);
+ vex_printf("%u", i->ARMin.Add32.imm32);
return;
case ARMin_EvCheck:
vex_printf("(evCheck) ldr r12,");
ret = "c.ngt.d";
break;
default:
- vex_printf("Unknown op: %d", op);
+ vex_printf("Unknown op: %d", (Int)op);
vpanic("showMIPSFpOp");
break;
}
UInt func)
{
if (rs >= 0x20)
- vex_printf("rs = %d\n", rs);
+ vex_printf("rs = %u\n", rs);
UInt theInstr;
vassert(opc < 0x40);
vassert(rs < 0x20);
break;
default:
vex_printf("ERROR: emit_PPCInstr quad default case %d \n",
- i->Pin.DfpShift128.op);
+ (Int)i->Pin.DfpShift128.op);
goto bad;
}
}
default:
vex_printf( "ERROR: iselDfp64Expr_wrk, UNKNOWN unop case %d\n",
- e->Iex.Unop.op );
+ (Int)e->Iex.Unop.op );
}
}
}
default:
vex_printf( "ERROR: iselDfp128Expr_wrk, UNKNOWN binop case %d\n",
- e->Iex.Binop.op );
+ (Int)e->Iex.Binop.op );
break;
}
}
if (hregIsVirtual(reg)) {
buf[0] = '\0';
switch (hregClass(reg)) {
- case HRcInt64: vex_sprintf(buf, "%%vR%d", r); break;
- case HRcFlt64: vex_sprintf(buf, "%%vF%d", r); break;
+ case HRcInt64: vex_sprintf(buf, "%%vR%u", r); break;
+ case HRcFlt64: vex_sprintf(buf, "%%vF%u", r); break;
default: goto fail;
}
return buf;
continue;
case 'G': /* %G = guest state @ offset */
- p += vex_sprintf(p, "guest[%d]", va_arg(args, UInt));
+ p += vex_sprintf(p, "guest[%u]", va_arg(args, UInt));
continue;
case 'C': /* %C = condition code */
for (i = 0; i < num_args; ++i) {
if (i != 0) p += vex_sprintf(p, ", ");
- p += vex_sprintf(p, "r%d", s390_gprno_from_arg_index(i));
+ p += vex_sprintf(p, "r%u", s390_gprno_from_arg_index(i));
}
continue;
}
IRType type = typeOfIRExpr(env->type_env, args[i]);
if (type != Ity_I64) {
++arg_errors;
- vex_printf("calling %s: argument #%d has type ", callee->name, i);
+ vex_printf("calling %s: argument #%u has type ", callee->name, i);
ppIRType(type);
vex_printf("; Ity_I64 is required\n");
}
if (op->type == TILEGX_OP_TYPE_REGISTER)
vex_printf("r%d", (Int) decoded[i].operand_values[n]);
else
- vex_printf("%ld", (unsigned long)decoded[i].operand_values[n]);
+ vex_printf("%llu", (ULong)decoded[i].operand_values[n]);
if (n != (decoded[i].opcode->num_operands - 1))
vex_printf(", ");
}
if (nVECRETs || nBBPTRs)
- vex_printf("nVECRETs=%d, nBBPTRs=%d\n",
+ vex_printf("nVECRETs=%u, nBBPTRs=%u\n",
nVECRETs, nBBPTRs);
if (TILEGX_N_REGPARMS < n_args) {
ppHRegX86(i->Xin.SseCMov.dst);
return;
case Xin_SseShuf:
- vex_printf("pshufd $0x%x,", i->Xin.SseShuf.order);
+ vex_printf("pshufd $0x%x,", (UInt)i->Xin.SseShuf.order);
ppHRegX86(i->Xin.SseShuf.src);
vex_printf(",");
ppHRegX86(i->Xin.SseShuf.dst);
case Ity_D128: vex_printf( "D128"); break;
case Ity_V128: vex_printf( "V128"); break;
case Ity_V256: vex_printf( "V256"); break;
- default: vex_printf("ty = 0x%x\n", (Int)ty);
+ default: vex_printf("ty = 0x%x\n", (UInt)ty);
vpanic("ppIRType");
}
}
if (tmp == IRTemp_INVALID)
vex_printf("IRTemp_INVALID");
else
- vex_printf( "t%d", (Int)tmp);
+ vex_printf( "t%u", tmp);
}
void ppIROp ( IROp op )