From: Julian Seward Date: Wed, 11 May 2005 15:37:50 +0000 (+0000) Subject: AMD64 backend cleanup: get rid of instruction variants which the insn X-Git-Tag: svn/VALGRIND_3_0_1^2~149 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=84c153b8a8e2bc7a22995caccef228d5553b1937;p=thirdparty%2Fvalgrind.git AMD64 backend cleanup: get rid of instruction variants which the insn selector doesn't generate. git-svn-id: svn://svn.valgrind.org/vex/trunk@1185 --- diff --git a/VEX/priv/host-amd64/hdefs.c b/VEX/priv/host-amd64/hdefs.c index 0c6ed743db..769d83e317 100644 --- a/VEX/priv/host-amd64/hdefs.c +++ b/VEX/priv/host-amd64/hdefs.c @@ -681,7 +681,7 @@ AMD64Instr* AMD64Instr_Alu64M ( AMD64AluOp op, AMD64RI* src, AMD64AMode* dst ) { vassert(op != Aalu_MUL); return i; } -AMD64Instr* AMD64Instr_Sh64 ( AMD64ShiftOp op, UInt src, AMD64RM* dst ) { +AMD64Instr* AMD64Instr_Sh64 ( AMD64ShiftOp op, UInt src, HReg dst ) { AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); i->tag = Ain_Sh64; i->Ain.Sh64.op = op; @@ -689,27 +689,25 @@ AMD64Instr* AMD64Instr_Sh64 ( AMD64ShiftOp op, UInt src, AMD64RM* dst ) { i->Ain.Sh64.dst = dst; return i; } -AMD64Instr* AMD64Instr_Test64 ( AMD64RI* src, AMD64RM* dst ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); - i->tag = Ain_Test64; - i->Ain.Test64.src = src; - i->Ain.Test64.dst = dst; +AMD64Instr* AMD64Instr_Test64 ( UInt imm32, HReg dst ) { + AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + i->tag = Ain_Test64; + i->Ain.Test64.imm32 = imm32; + i->Ain.Test64.dst = dst; return i; } -AMD64Instr* AMD64Instr_Unary64 ( AMD64UnaryOp op, AMD64RM* dst ) { +AMD64Instr* AMD64Instr_Unary64 ( AMD64UnaryOp op, HReg dst ) { AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); i->tag = Ain_Unary64; i->Ain.Unary64.op = op; i->Ain.Unary64.dst = dst; return i; } -AMD64Instr* AMD64Instr_MulL ( Bool syned, Int sz, AMD64RM* src ) { +AMD64Instr* AMD64Instr_MulL ( Bool syned, AMD64RM* src ) { AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); i->tag = Ain_MulL; i->Ain.MulL.syned = syned; - i->Ain.MulL.sz = sz; i->Ain.MulL.src = src; - vassert(sz == 2 || sz == 4 || sz == 8); return i; } AMD64Instr* AMD64Instr_Div ( Bool syned, Int sz, AMD64RM* src ) { @@ -1066,22 +1064,18 @@ void ppAMD64Instr ( AMD64Instr* i ) vex_printf("%%cl,"); else vex_printf("$%d,", (Int)i->Ain.Sh64.src); - ppAMD64RM(i->Ain.Sh64.dst); + ppHRegAMD64(i->Ain.Sh64.dst); return; case Ain_Test64: - vex_printf("testq "); - ppAMD64RI(i->Ain.Test64.src); - vex_printf(","); - ppAMD64RM(i->Ain.Test64.dst); + vex_printf("testq $%d,", (Int)i->Ain.Test64.imm32); + ppHRegAMD64(i->Ain.Test64.dst); return; case Ain_Unary64: vex_printf("%sq ", showAMD64UnaryOp(i->Ain.Unary64.op)); - ppAMD64RM(i->Ain.Unary64.dst); + ppHRegAMD64(i->Ain.Unary64.dst); return; case Ain_MulL: - vex_printf("%cmul%s ", - i->Ain.MulL.syned ? 's' : 'u', - showAMD64ScalarSz(i->Ain.MulL.sz)); + vex_printf("%cmulq ", i->Ain.MulL.syned ? 's' : 'u'); ppAMD64RM(i->Ain.MulL.src); return; case Ain_Div: @@ -1386,16 +1380,15 @@ void getRegUsage_AMD64Instr ( HRegUsage* u, AMD64Instr* i ) addRegUsage_AMD64AMode(u, i->Ain.Alu64M.dst); return; case Ain_Sh64: - addRegUsage_AMD64RM(u, i->Ain.Sh64.dst, HRmModify); + addHRegUse(u, HRmModify, i->Ain.Sh64.dst); if (i->Ain.Sh64.src == 0) addHRegUse(u, HRmRead, hregAMD64_RCX()); return; case Ain_Test64: - addRegUsage_AMD64RI(u, i->Ain.Test64.src); - addRegUsage_AMD64RM(u, i->Ain.Test64.dst, HRmRead); + addHRegUse(u, HRmRead, i->Ain.Test64.dst); return; case Ain_Unary64: - addRegUsage_AMD64RM(u, i->Ain.Unary64.dst, HRmModify); + addHRegUse(u, HRmModify, i->Ain.Unary64.dst); return; case Ain_MulL: addRegUsage_AMD64RM(u, i->Ain.MulL.src, HRmRead); @@ -1657,14 +1650,13 @@ void mapRegs_AMD64Instr ( HRegRemap* m, AMD64Instr* i ) mapRegs_AMD64AMode(m, i->Ain.Alu64M.dst); return; case Ain_Sh64: - mapRegs_AMD64RM(m, i->Ain.Sh64.dst); + mapReg(m, &i->Ain.Sh64.dst); return; case Ain_Test64: - mapRegs_AMD64RI(m, i->Ain.Test64.src); - mapRegs_AMD64RM(m, i->Ain.Test64.dst); + mapReg(m, &i->Ain.Test64.dst); return; case Ain_Unary64: - mapRegs_AMD64RM(m, i->Ain.Unary64.dst); + mapReg(m, &i->Ain.Unary64.dst); return; case Ain_MulL: mapRegs_AMD64RM(m, i->Ain.MulL.src); @@ -1827,14 +1819,7 @@ Bool isMove_AMD64Instr ( AMD64Instr* i, HReg* src, HReg* dst ) *dst = i->Ain.Alu64R.dst; return True; } -//.. /* Moves between FP regs */ -//.. if (i->tag == Xin_FpUnary) { -//.. if (i->Xin.FpUnary.op != Xfp_MOV) -//.. return False; -//.. *src = i->Xin.FpUnary.src; -//.. *dst = i->Xin.FpUnary.dst; -//.. return True; -//.. } + /* Moves between vector regs */ if (i->tag == Ain_SseReRg) { if (i->Ain.SseReRg.op != Asse_MOV) return False; @@ -1860,8 +1845,6 @@ AMD64Instr* genSpill_AMD64 ( HReg rreg, Int offsetB ) switch (hregClass(rreg)) { case HRcInt64: return AMD64Instr_Alu64M ( Aalu_MOV, AMD64RI_Reg(rreg), am ); - //case HRcFlt64: - // return AMD64Instr_FpLdSt ( False/*store*/, 8, rreg, am ); case HRcVec128: return AMD64Instr_SseLdSt ( False/*store*/, 16, rreg, am ); default: @@ -1879,8 +1862,6 @@ AMD64Instr* genReload_AMD64 ( HReg rreg, Int offsetB ) switch (hregClass(rreg)) { case HRcInt64: return AMD64Instr_Alu64R ( Aalu_MOV, AMD64RMI_Mem(am), rreg ); - //case HRcFlt64: - // return AMD64Instr_FpLdSt ( True/*load*/, 8, rreg, am ); case HRcVec128: return AMD64Instr_SseLdSt ( True/*load*/, 16, rreg, am ); default: @@ -1925,19 +1906,6 @@ static UChar iregBits3210 ( HReg r ) return toUChar(n); } - - -//.. static UInt fregNo ( HReg r ) -//.. { -//.. UInt n; -//.. vassert(hregClass(r) == HRcFlt64); -//.. vassert(!hregIsVirtual(r)); -//.. n = hregNumber(r); -//.. vassert(n <= 5); -//.. return n; -//.. } - - /* Given an xmm (128bit V-class) register number, produce the equivalent numbered register in 64-bit I-class. This is a bit of fakery which facilitates using functions that work on integer @@ -2455,88 +2423,61 @@ Int emit_AMD64Instr ( UChar* buf, Int nbuf, AMD64Instr* i ) default: goto bad; } if (i->Ain.Sh64.src == 0) { - *p++ = rexAMode_R(fake(0), - i->Ain.Sh64.dst->Arm.Reg.reg); + *p++ = rexAMode_R(fake(0), i->Ain.Sh64.dst); *p++ = toUChar(opc_cl); - switch (i->Ain.Sh64.dst->tag) { - case Arm_Reg: - p = doAMode_R(p, fake(subopc), - i->Ain.Sh64.dst->Arm.Reg.reg); - goto done; - default: - goto bad; - } + p = doAMode_R(p, fake(subopc), i->Ain.Sh64.dst); + goto done; } else { - *p++ = rexAMode_R(fake(0), i->Ain.Sh64.dst->Arm.Reg.reg); + *p++ = rexAMode_R(fake(0), i->Ain.Sh64.dst); *p++ = toUChar(opc_imm); - switch (i->Ain.Sh64.dst->tag) { - case Arm_Reg: - p = doAMode_R(p, fake(subopc), - i->Ain.Sh64.dst->Arm.Reg.reg); - *p++ = (UChar)(i->Ain.Sh64.src); - goto done; - default: - goto bad; - } + p = doAMode_R(p, fake(subopc), i->Ain.Sh64.dst); + *p++ = (UChar)(i->Ain.Sh64.src); + goto done; } break; case Ain_Test64: - if (i->Ain.Test64.src->tag == Ari_Imm - && i->Ain.Test64.dst->tag == Arm_Reg) { - /* testq sign-extend($imm32), %reg */ - *p++ = rexAMode_R(fake(0), i->Ain.Test64.dst->Arm.Reg.reg); - *p++ = 0xF7; - p = doAMode_R(p, fake(0), i->Ain.Test64.dst->Arm.Reg.reg); - p = emit32(p, i->Ain.Test64.src->Ari.Imm.imm32); - goto done; - } - break; + /* testq sign-extend($imm32), %reg */ + *p++ = rexAMode_R(fake(0), i->Ain.Test64.dst); + *p++ = 0xF7; + p = doAMode_R(p, fake(0), i->Ain.Test64.dst); + p = emit32(p, i->Ain.Test64.imm32); + goto done; case Ain_Unary64: if (i->Ain.Unary64.op == Aun_NOT) { - if (i->Ain.Unary64.dst->tag == Arm_Reg) { - *p++ = rexAMode_R(fake(0), i->Ain.Unary64.dst->Arm.Reg.reg); - *p++ = 0xF7; - p = doAMode_R(p, fake(2), i->Ain.Unary64.dst->Arm.Reg.reg); - goto done; - } else { - goto bad; - } + *p++ = rexAMode_R(fake(0), i->Ain.Unary64.dst); + *p++ = 0xF7; + p = doAMode_R(p, fake(2), i->Ain.Unary64.dst); + goto done; } if (i->Ain.Unary64.op == Aun_NEG) { - if (i->Ain.Unary64.dst->tag == Arm_Reg) { - *p++ = rexAMode_R(fake(0), i->Ain.Unary64.dst->Arm.Reg.reg); - *p++ = 0xF7; - p = doAMode_R(p, fake(3), i->Ain.Unary64.dst->Arm.Reg.reg); - goto done; - } else { - goto bad; - } + *p++ = rexAMode_R(fake(0), i->Ain.Unary64.dst); + *p++ = 0xF7; + p = doAMode_R(p, fake(3), i->Ain.Unary64.dst); + goto done; } break; case Ain_MulL: subopc = i->Ain.MulL.syned ? 5 : 4; - if (i->Ain.MulL.sz == 8) { - switch (i->Ain.MulL.src->tag) { - case Arm_Mem: - *p++ = rexAMode_M( fake(0), - i->Ain.MulL.src->Arm.Mem.am); - *p++ = 0xF7; - p = doAMode_M(p, fake(subopc), - i->Ain.MulL.src->Arm.Mem.am); - goto done; - case Arm_Reg: - *p++ = rexAMode_R(fake(0), - i->Ain.MulL.src->Arm.Reg.reg); - *p++ = 0xF7; - p = doAMode_R(p, fake(subopc), - i->Ain.MulL.src->Arm.Reg.reg); - goto done; - default: - goto bad; - } + switch (i->Ain.MulL.src->tag) { + case Arm_Mem: + *p++ = rexAMode_M( fake(0), + i->Ain.MulL.src->Arm.Mem.am); + *p++ = 0xF7; + p = doAMode_M(p, fake(subopc), + i->Ain.MulL.src->Arm.Mem.am); + goto done; + case Arm_Reg: + *p++ = rexAMode_R(fake(0), + i->Ain.MulL.src->Arm.Reg.reg); + *p++ = 0xF7; + p = doAMode_R(p, fake(subopc), + i->Ain.MulL.src->Arm.Reg.reg); + goto done; + default: + goto bad; } break; diff --git a/VEX/priv/host-amd64/hdefs.h b/VEX/priv/host-amd64/hdefs.h index b1e40bc9b1..9e5d0e21c4 100644 --- a/VEX/priv/host-amd64/hdefs.h +++ b/VEX/priv/host-amd64/hdefs.h @@ -429,22 +429,21 @@ typedef struct { AMD64ShiftOp op; UInt src; /* shift amount, or 0 means %cl */ - AMD64RM* dst; + HReg dst; } Sh64; struct { - AMD64RI* src; - AMD64RM* dst; + UInt imm32; + HReg dst; } Test64; /* Not and Neg */ struct { AMD64UnaryOp op; - AMD64RM* dst; + HReg dst; } Unary64; - /* DX:AX = AX *s/u r/m16, or EDX:EAX = EAX *s/u r/m32, - or RDX:RAX = RAX *s/u r/m64 */ + /* 64 x 64 -> 128 bit widening multiply: RDX:RAX = RAX *s/u + r/m64 */ struct { Bool syned; - Int sz; /* 2, 4 or 8 only */ AMD64RM* src; } MulL; /* amd64 div/idiv instruction. Modifies RDX and RAX and @@ -651,10 +650,10 @@ typedef extern AMD64Instr* AMD64Instr_Imm64 ( ULong imm64, HReg dst ); extern AMD64Instr* AMD64Instr_Alu64R ( AMD64AluOp, AMD64RMI*, HReg ); extern AMD64Instr* AMD64Instr_Alu64M ( AMD64AluOp, AMD64RI*, AMD64AMode* ); -extern AMD64Instr* AMD64Instr_Unary64 ( AMD64UnaryOp op, AMD64RM* dst ); -extern AMD64Instr* AMD64Instr_Sh64 ( AMD64ShiftOp, UInt, AMD64RM* ); -extern AMD64Instr* AMD64Instr_Test64 ( AMD64RI* src, AMD64RM* dst ); -extern AMD64Instr* AMD64Instr_MulL ( Bool syned, Int sz, AMD64RM* ); +extern AMD64Instr* AMD64Instr_Unary64 ( AMD64UnaryOp op, HReg dst ); +extern AMD64Instr* AMD64Instr_Sh64 ( AMD64ShiftOp, UInt, HReg ); +extern AMD64Instr* AMD64Instr_Test64 ( UInt imm32, HReg dst ); +extern AMD64Instr* AMD64Instr_MulL ( Bool syned, AMD64RM* ); extern AMD64Instr* AMD64Instr_Div ( Bool syned, Int sz, AMD64RM* ); //.. extern AMD64Instr* AMD64Instr_Sh3232 ( AMD64ShiftOp, UInt amt, HReg src, HReg dst ); extern AMD64Instr* AMD64Instr_Push ( AMD64RMI* ); diff --git a/VEX/priv/host-amd64/isel.c b/VEX/priv/host-amd64/isel.c index 1bc7d684cf..63eebda65c 100644 --- a/VEX/priv/host-amd64/isel.c +++ b/VEX/priv/host-amd64/isel.c @@ -659,7 +659,7 @@ void set_SSE_rounding_mode ( ISelEnv* env, IRExpr* mode ) addInstr(env, AMD64Instr_Alu64R(Aalu_MOV, AMD64RMI_Imm(3), reg)); addInstr(env, AMD64Instr_Alu64R(Aalu_AND, iselIntExpr_RMI(env, mode), reg)); - addInstr(env, AMD64Instr_Sh64(Ash_SHL, 13, AMD64RM_Reg(reg))); + addInstr(env, AMD64Instr_Sh64(Ash_SHL, 13, reg)); addInstr(env, AMD64Instr_Alu64R( Aalu_OR, AMD64RMI_Imm(DEFAULT_MXCSR), reg)); addInstr(env, AMD64Instr_Push(AMD64RMI_Reg(reg))); @@ -689,7 +689,7 @@ void set_FPU_rounding_mode ( ISelEnv* env, IRExpr* mode ) */ addInstr(env, mk_iMOVsd_RR(rrm, rrm2)); addInstr(env, AMD64Instr_Alu64R(Aalu_AND, AMD64RMI_Imm(3), rrm2)); - addInstr(env, AMD64Instr_Sh64(Ash_SHL, 10, AMD64RM_Reg(rrm2))); + addInstr(env, AMD64Instr_Sh64(Ash_SHL, 10, rrm2)); addInstr(env, AMD64Instr_Alu64R(Aalu_OR, AMD64RMI_Imm(DEFAULT_FPUCW), rrm2)); addInstr(env, AMD64Instr_Alu64M(Aalu_MOV, @@ -893,8 +893,8 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) //.. addInstr(env, X86Instr_Sh32(Xsh_SAR, 16, X86RM_Reg(dst))); //.. break; case Iop_Sar32: - addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, AMD64RM_Reg(dst))); - addInstr(env, AMD64Instr_Sh64(Ash_SAR, 32, AMD64RM_Reg(dst))); + addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, dst)); + addInstr(env, AMD64Instr_Sh64(Ash_SAR, 32, dst)); break; default: ppIROp(e->Iex.Binop.op); @@ -911,15 +911,12 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) vassert(nshift >= 0); if (nshift > 0) /* Can't allow nshift==0 since that means %cl */ - addInstr(env, AMD64Instr_Sh64( - shOp, - nshift, - AMD64RM_Reg(dst))); + addInstr(env, AMD64Instr_Sh64(shOp, nshift, dst)); } else { /* General case; we have to force the amount into %cl. */ HReg regR = iselIntExpr_R(env, e->Iex.Binop.arg2); addInstr(env, mk_iMOVsd_RR(regR,hregAMD64_RCX())); - addInstr(env, AMD64Instr_Sh64(shOp, 0/* %cl */, AMD64RM_Reg(dst))); + addInstr(env, AMD64Instr_Sh64(shOp, 0/* %cl */, dst)); } return dst; } @@ -1076,11 +1073,11 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) HReg left64 = iselIntExpr_R(env, e->Iex.Binop.arg1); addInstr(env, mk_iMOVsd_RR(left64, rdx)); addInstr(env, mk_iMOVsd_RR(left64, rax)); - addInstr(env, AMD64Instr_Sh64(Ash_SHR, 32, AMD64RM_Reg(rdx))); + addInstr(env, AMD64Instr_Sh64(Ash_SHR, 32, rdx)); addInstr(env, AMD64Instr_Div(syned, 4, rmRight)); addInstr(env, AMD64Instr_MovZLQ(rdx,rdx)); addInstr(env, AMD64Instr_MovZLQ(rax,rax)); - addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, AMD64RM_Reg(rdx))); + addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, rdx)); addInstr(env, mk_iMOVsd_RR(rax, dst)); addInstr(env, AMD64Instr_Alu64R(Aalu_OR, AMD64RMI_Reg(rdx), dst)); return dst; @@ -1093,7 +1090,7 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) HReg lo32s = iselIntExpr_R(env, e->Iex.Binop.arg2); addInstr(env, mk_iMOVsd_RR(hi32s, hi32)); addInstr(env, mk_iMOVsd_RR(lo32s, lo32)); - addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, AMD64RM_Reg(hi32))); + addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, hi32)); addInstr(env, AMD64Instr_MovZLQ(lo32,lo32)); addInstr(env, AMD64Instr_Alu64R( Aalu_OR, AMD64RMI_Reg(lo32), hi32)); @@ -1107,7 +1104,7 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) HReg lo16s = iselIntExpr_R(env, e->Iex.Binop.arg2); addInstr(env, mk_iMOVsd_RR(hi16s, hi16)); addInstr(env, mk_iMOVsd_RR(lo16s, lo16)); - addInstr(env, AMD64Instr_Sh64(Ash_SHL, 16, AMD64RM_Reg(hi16))); + addInstr(env, AMD64Instr_Sh64(Ash_SHL, 16, hi16)); addInstr(env, AMD64Instr_Alu64R( Aalu_AND, AMD64RMI_Imm(0xFFFF), lo16)); addInstr(env, AMD64Instr_Alu64R( @@ -1122,7 +1119,7 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) HReg lo8s = iselIntExpr_R(env, e->Iex.Binop.arg2); addInstr(env, mk_iMOVsd_RR(hi8s, hi8)); addInstr(env, mk_iMOVsd_RR(lo8s, lo8)); - addInstr(env, AMD64Instr_Sh64(Ash_SHL, 8, AMD64RM_Reg(hi8))); + addInstr(env, AMD64Instr_Sh64(Ash_SHL, 8, hi8)); addInstr(env, AMD64Instr_Alu64R( Aalu_AND, AMD64RMI_Imm(0xFF), lo8)); addInstr(env, AMD64Instr_Alu64R( @@ -1154,10 +1151,10 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) addInstr(env, mk_iMOVsd_RR(a32s, a32)); addInstr(env, mk_iMOVsd_RR(b32s, b32)); - addInstr(env, AMD64Instr_Sh64(Ash_SHL, shift, AMD64RM_Reg(a32))); - addInstr(env, AMD64Instr_Sh64(Ash_SHL, shift, AMD64RM_Reg(b32))); - addInstr(env, AMD64Instr_Sh64(shr_op, shift, AMD64RM_Reg(a32))); - addInstr(env, AMD64Instr_Sh64(shr_op, shift, AMD64RM_Reg(b32))); + addInstr(env, AMD64Instr_Sh64(Ash_SHL, shift, a32)); + addInstr(env, AMD64Instr_Sh64(Ash_SHL, shift, b32)); + addInstr(env, AMD64Instr_Sh64(shr_op, shift, a32)); + addInstr(env, AMD64Instr_Sh64(shr_op, shift, b32)); addInstr(env, AMD64Instr_Alu64R(Aalu_MUL, AMD64RMI_Reg(a32), b32)); return b32; } @@ -1259,8 +1256,8 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) HReg dst = newVRegI(env); HReg src = iselIntExpr_R(env, expr8); addInstr(env, mk_iMOVsd_RR(src,dst) ); - addInstr(env, AMD64Instr_Sh64(Ash_SHL, 56, AMD64RM_Reg(dst))); - addInstr(env, AMD64Instr_Sh64(Ash_SHR, 56, AMD64RM_Reg(dst))); + addInstr(env, AMD64Instr_Sh64(Ash_SHL, 56, dst)); + addInstr(env, AMD64Instr_Sh64(Ash_SHR, 56, dst)); return dst; } @@ -1302,8 +1299,8 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) HReg src = iselIntExpr_R(env, e->Iex.Unop.arg); UInt amt = 32; addInstr(env, mk_iMOVsd_RR(src,dst) ); - addInstr(env, AMD64Instr_Sh64(Ash_SHL, amt, AMD64RM_Reg(dst))); - addInstr(env, AMD64Instr_Sh64(Ash_SAR, amt, AMD64RM_Reg(dst))); + addInstr(env, AMD64Instr_Sh64(Ash_SHL, amt, dst)); + addInstr(env, AMD64Instr_Sh64(Ash_SAR, amt, dst)); return dst; } case Iop_128HIto64: { @@ -1342,8 +1339,8 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) || e->Iex.Unop.op==Iop_16Sto64 ); UInt amt = srcIs16 ? 48 : 56; addInstr(env, mk_iMOVsd_RR(src,dst) ); - addInstr(env, AMD64Instr_Sh64(Ash_SHL, amt, AMD64RM_Reg(dst))); - addInstr(env, AMD64Instr_Sh64(Ash_SAR, amt, AMD64RM_Reg(dst))); + addInstr(env, AMD64Instr_Sh64(Ash_SHL, amt, dst)); + addInstr(env, AMD64Instr_Sh64(Ash_SAR, amt, dst)); return dst; } case Iop_Not8: @@ -1353,7 +1350,7 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) HReg dst = newVRegI(env); HReg src = iselIntExpr_R(env, e->Iex.Unop.arg); addInstr(env, mk_iMOVsd_RR(src,dst) ); - addInstr(env, AMD64Instr_Unary64(Aun_NOT,AMD64RM_Reg(dst))); + addInstr(env, AMD64Instr_Unary64(Aun_NOT,dst)); return dst; } //.. case Iop_64HIto32: { @@ -1378,8 +1375,7 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) default: vassert(0); } addInstr(env, mk_iMOVsd_RR(src,dst) ); - addInstr(env, AMD64Instr_Sh64( - Ash_SHR, shift, AMD64RM_Reg(dst))); + addInstr(env, AMD64Instr_Sh64(Ash_SHR, shift, dst)); return dst; } case Iop_1Uto64: @@ -1398,8 +1394,8 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) HReg dst = newVRegI(env); AMD64CondCode cond = iselCondCode(env, e->Iex.Unop.arg); addInstr(env, AMD64Instr_Set64(cond,dst)); - addInstr(env, AMD64Instr_Sh64(Ash_SHL, 63, AMD64RM_Reg(dst))); - addInstr(env, AMD64Instr_Sh64(Ash_SAR, 63, AMD64RM_Reg(dst))); + addInstr(env, AMD64Instr_Sh64(Ash_SHL, 63, dst)); + addInstr(env, AMD64Instr_Sh64(Ash_SAR, 63, dst)); return dst; } case Iop_Ctz64: { @@ -1430,7 +1426,7 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) HReg dst = newVRegI(env); HReg reg = iselIntExpr_R(env, e->Iex.Unop.arg); addInstr(env, mk_iMOVsd_RR(reg,dst)); - addInstr(env, AMD64Instr_Unary64(Aun_NEG,AMD64RM_Reg(dst))); + addInstr(env, AMD64Instr_Unary64(Aun_NEG,dst)); return dst; } @@ -1597,7 +1593,7 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) HReg dst = newVRegI(env); addInstr(env, mk_iMOVsd_RR(rX,dst)); r8 = iselIntExpr_R(env, e->Iex.Mux0X.cond); - addInstr(env, AMD64Instr_Test64(AMD64RI_Imm(0xFF), AMD64RM_Reg(r8))); + addInstr(env, AMD64Instr_Test64(0xFF, r8)); addInstr(env, AMD64Instr_CMov64(Acc_Z,r0,dst)); return dst; } @@ -1958,8 +1954,8 @@ static AMD64CondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e ) /* 64to1 */ if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_64to1) { - AMD64RM* rm = iselIntExpr_RM(env, e->Iex.Unop.arg); - addInstr(env, AMD64Instr_Test64(AMD64RI_Imm(1),rm)); + HReg reg = iselIntExpr_R(env, e->Iex.Unop.arg); + addInstr(env, AMD64Instr_Test64(1,reg)); return Acc_NZ; } @@ -1969,7 +1965,7 @@ static AMD64CondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e ) if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_CmpNEZ8) { HReg r = iselIntExpr_R(env, e->Iex.Unop.arg); - addInstr(env, AMD64Instr_Test64(AMD64RI_Imm(0xFF),AMD64RM_Reg(r))); + addInstr(env, AMD64Instr_Test64(0xFF,r)); return Acc_NZ; } @@ -1979,7 +1975,7 @@ static AMD64CondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e ) if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_CmpNEZ16) { HReg r = iselIntExpr_R(env, e->Iex.Unop.arg); - addInstr(env, AMD64Instr_Test64(AMD64RI_Imm(0xFFFF),AMD64RM_Reg(r))); + addInstr(env, AMD64Instr_Test64(0xFFFF,r)); return Acc_NZ; } @@ -2052,7 +2048,7 @@ static AMD64CondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e ) HReg r = newVRegI(env); addInstr(env, mk_iMOVsd_RR(r1,r)); addInstr(env, AMD64Instr_Alu64R(Aalu_XOR,rmi2,r)); - addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, AMD64RM_Reg(r))); + addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, r)); switch (e->Iex.Binop.op) { case Iop_CmpEQ32: return Acc_Z; case Iop_CmpNE32: return Acc_NZ; @@ -2265,7 +2261,7 @@ static void iselInt128Expr_wrk ( HReg* rHi, HReg* rLo, AMD64RM* rmLeft = iselIntExpr_RM(env, e->Iex.Binop.arg1); HReg rRight = iselIntExpr_R(env, e->Iex.Binop.arg2); addInstr(env, mk_iMOVsd_RR(rRight, hregAMD64_RAX())); - addInstr(env, AMD64Instr_MulL(syned, 8, rmLeft)); + addInstr(env, AMD64Instr_MulL(syned, rmLeft)); /* Result is now in RDX:RAX. Tell the caller. */ addInstr(env, mk_iMOVsd_RR(hregAMD64_RDX(), tHi)); addInstr(env, mk_iMOVsd_RR(hregAMD64_RAX(), tLo)); @@ -2995,7 +2991,7 @@ static HReg iselDblExpr_wrk ( ISelEnv* env, IRExpr* e ) r0 = iselDblExpr(env, e->Iex.Mux0X.expr0); dst = newVRegV(env); addInstr(env, mk_vMOVsd_RR(rX,dst)); - addInstr(env, AMD64Instr_Test64(AMD64RI_Imm(0xFF), AMD64RM_Reg(r8))); + addInstr(env, AMD64Instr_Test64(0xFF, r8)); addInstr(env, AMD64Instr_SseCMov(Acc_Z,r0,dst)); return dst; } @@ -3475,7 +3471,7 @@ static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e ) HReg r0 = iselVecExpr(env, e->Iex.Mux0X.expr0); HReg dst = newVRegV(env); addInstr(env, mk_vMOVsd_RR(rX,dst)); - addInstr(env, AMD64Instr_Test64(AMD64RI_Imm(0xFF), AMD64RM_Reg(r8))); + addInstr(env, AMD64Instr_Test64(0xFF, r8)); addInstr(env, AMD64Instr_SseCMov(Acc_Z,r0,dst)); return dst; }