ULong arg, ULong rot_amt, ULong rflags_in, Long sz
);
+extern ULong amd64g_calculate_RCL (
+ ULong arg, ULong rot_amt, ULong rflags_in, Long sz
+ );
+
extern ULong amd64g_check_fldcw ( ULong fpucw );
extern ULong amd64g_create_fpucw ( ULong fpround );
return wantRflags ? rflags_in : arg;
}
+ULong amd64g_calculate_RCL ( ULong arg,
+ ULong rot_amt,
+ ULong rflags_in,
+ Long szIN )
+{
+ Bool wantRflags = toBool(szIN < 0);
+ ULong sz = wantRflags ? (-szIN) : szIN;
+ ULong tempCOUNT = rot_amt & (sz == 8 ? 0x3F : 0x1F);
+ ULong cf=0, of=0, tempcf;
+
+ switch (sz) {
+ case 8:
+ cf = (rflags_in >> AMD64G_CC_SHIFT_C) & 1;
+ while (tempCOUNT > 0) {
+ tempcf = (arg >> 63) & 1;
+ arg = (arg << 1) | (cf & 1);
+ cf = tempcf;
+ tempCOUNT--;
+ }
+ of = ((arg >> 63) ^ cf) & 1;
+ break;
+ case 4:
+ while (tempCOUNT >= 33) tempCOUNT -= 33;
+ cf = (rflags_in >> AMD64G_CC_SHIFT_C) & 1;
+ while (tempCOUNT > 0) {
+ tempcf = (arg >> 31) & 1;
+ arg = 0xFFFFFFFFULL & ((arg << 1) | (cf & 1));
+ cf = tempcf;
+ tempCOUNT--;
+ }
+ of = ((arg >> 31) ^ cf) & 1;
+ break;
+ case 2:
+ while (tempCOUNT >= 17) tempCOUNT -= 17;
+ cf = (rflags_in >> AMD64G_CC_SHIFT_C) & 1;
+ while (tempCOUNT > 0) {
+ tempcf = (arg >> 15) & 1;
+ arg = 0xFFFFULL & ((arg << 1) | (cf & 1));
+ cf = tempcf;
+ tempCOUNT--;
+ }
+ of = ((arg >> 15) ^ cf) & 1;
+ break;
+ case 1:
+ while (tempCOUNT >= 9) tempCOUNT -= 9;
+ cf = (rflags_in >> AMD64G_CC_SHIFT_C) & 1;
+ while (tempCOUNT > 0) {
+ tempcf = (arg >> 7) & 1;
+ arg = 0xFFULL & ((arg << 1) | (cf & 1));
+ cf = tempcf;
+ tempCOUNT--;
+ }
+ of = ((arg >> 7) ^ cf) & 1;
+ break;
+ default:
+ vpanic("calculate_RCL(amd64g): invalid size");
+ }
+
+ cf &= 1;
+ of &= 1;
+ rflags_in &= ~(AMD64G_CC_MASK_C | AMD64G_CC_MASK_O);
+ rflags_in |= (cf << AMD64G_CC_SHIFT_C) | (of << AMD64G_CC_SHIFT_O);
+
+ return wantRflags ? rflags_in : arg;
+}
+
/* CALLED FROM GENERATED CODE */
/* DIRTY HELPER (non-referentially-transparent) */
/* delta on entry points at the modrm byte. */
HChar dis_buf[50];
Int len;
- Bool isShift, isRotate, isRotateRC;
+ Bool isShift, isRotate, isRotateC;
IRType ty = szToITy(sz);
IRTemp dst0 = newTemp(ty);
IRTemp dst1 = newTemp(ty);
isRotate = False;
switch (gregLO3ofRM(modrm)) { case 0: case 1: isRotate = True; }
- isRotateRC = toBool(gregLO3ofRM(modrm) == 3);
+ isRotateC = False;
+ switch (gregLO3ofRM(modrm)) { case 2: case 3: isRotateC = True; }
- if (!isShift && !isRotate && !isRotateRC) {
+ if (!isShift && !isRotate && !isRotateC) {
vex_printf("\ncase %d\n", gregLO3ofRM(modrm));
vpanic("dis_Grp2(Reg): unhandled case(amd64)");
}
- if (isRotateRC) {
+ if (isRotateC) {
/* Call a helper; this insn is so ridiculous it does not deserve
better. One problem is, the helper has to calculate both the
new value and the new flags. This is more than 64 bits, and
using the sign of the sz field to indicate whether it is the
value or rflags result we want.
*/
+ Bool left = toBool(gregLO3ofRM(modrm) == 2);
IRExpr** argsVALUE;
IRExpr** argsRFLAGS;
mkIRExprCCall(
Ity_I64,
0/*regparm*/,
- "amd64g_calculate_RCR", &amd64g_calculate_RCR,
+ left ? "amd64g_calculate_RCL" : "amd64g_calculate_RCR",
+ left ? &amd64g_calculate_RCL : &amd64g_calculate_RCR,
argsVALUE
)
);
mkIRExprCCall(
Ity_I64,
0/*regparm*/,
- "amd64g_calculate_RCR", &amd64g_calculate_RCR,
+ left ? "amd64g_calculate_RCL" : "amd64g_calculate_RCR",
+ left ? &amd64g_calculate_RCL : &amd64g_calculate_RCR,
argsRFLAGS
)
);