From: Florian Krohm Date: Mon, 11 Feb 2013 00:47:35 +0000 (+0000) Subject: Make HReg a struct. In the past there were several occurences where X-Git-Tag: svn/VALGRIND_3_9_0^2~112 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=a87a4d1bfd34ea62ec02951073cfb8e5abc4cbc4;p=thirdparty%2Fvalgrind.git Make HReg a struct. In the past there were several occurences where a HReg was assigned to an integer. This worked by accident because the bits representing the register number (which was meant to be accessed) happened to be in the right place. Two new functions: hregIsInvalid and sameHReg. The HReg struct just wraps the integer that was previously used to represent a register without changing the encoding. git-svn-id: svn://svn.valgrind.org/vex/trunk@2682 --- diff --git a/VEX/priv/host_amd64_defs.c b/VEX/priv/host_amd64_defs.c index 39756b44be..e217cfef1f 100644 --- a/VEX/priv/host_amd64_defs.c +++ b/VEX/priv/host_amd64_defs.c @@ -1577,7 +1577,7 @@ void getRegUsage_AMD64Instr ( HRegUsage* u, AMD64Instr* i, Bool mode64 ) case Ain_SseReRg: if ( (i->Ain.SseReRg.op == Asse_XOR || i->Ain.SseReRg.op == Asse_CMPEQ32) - && i->Ain.SseReRg.src == i->Ain.SseReRg.dst) { + && sameHReg(i->Ain.SseReRg.src, i->Ain.SseReRg.dst)) { /* reg-alloc needs to understand 'xor r,r' and 'cmpeqd r,r' as a write of a value to r, and independent of any previous value in r */ @@ -2048,43 +2048,43 @@ static UChar* doAMode_M ( UChar* p, HReg greg, AMD64AMode* am ) { if (am->tag == Aam_IR) { if (am->Aam.IR.imm == 0 - && am->Aam.IR.reg != hregAMD64_RSP() - && am->Aam.IR.reg != hregAMD64_RBP() - && am->Aam.IR.reg != hregAMD64_R12() - && am->Aam.IR.reg != hregAMD64_R13() + && ! sameHReg(am->Aam.IR.reg, hregAMD64_RSP()) + && ! sameHReg(am->Aam.IR.reg, hregAMD64_RBP()) + && ! sameHReg(am->Aam.IR.reg, hregAMD64_R12()) + && ! sameHReg(am->Aam.IR.reg, hregAMD64_R13()) ) { *p++ = mkModRegRM(0, iregBits210(greg), iregBits210(am->Aam.IR.reg)); return p; } if (fits8bits(am->Aam.IR.imm) - && am->Aam.IR.reg != hregAMD64_RSP() - && am->Aam.IR.reg != hregAMD64_R12() + && ! sameHReg(am->Aam.IR.reg, hregAMD64_RSP()) + && ! sameHReg(am->Aam.IR.reg, hregAMD64_R12()) ) { *p++ = mkModRegRM(1, iregBits210(greg), iregBits210(am->Aam.IR.reg)); *p++ = toUChar(am->Aam.IR.imm & 0xFF); return p; } - if (am->Aam.IR.reg != hregAMD64_RSP() - && am->Aam.IR.reg != hregAMD64_R12() + if (! sameHReg(am->Aam.IR.reg, hregAMD64_RSP()) + && ! sameHReg(am->Aam.IR.reg, hregAMD64_R12()) ) { *p++ = mkModRegRM(2, iregBits210(greg), iregBits210(am->Aam.IR.reg)); p = emit32(p, am->Aam.IR.imm); return p; } - if ((am->Aam.IR.reg == hregAMD64_RSP() - || am->Aam.IR.reg == hregAMD64_R12()) + if ((sameHReg(am->Aam.IR.reg, hregAMD64_RSP()) + || sameHReg(am->Aam.IR.reg, hregAMD64_R12())) && fits8bits(am->Aam.IR.imm)) { *p++ = mkModRegRM(1, iregBits210(greg), 4); *p++ = 0x24; *p++ = toUChar(am->Aam.IR.imm & 0xFF); return p; } - if (/* (am->Aam.IR.reg == hregAMD64_RSP() + if (/* (sameHReg(am->Aam.IR.reg, hregAMD64_RSP()) || wait for test case for RSP case */ - am->Aam.IR.reg == hregAMD64_R12()) { + sameHReg(am->Aam.IR.reg, hregAMD64_R12())) { *p++ = mkModRegRM(2, iregBits210(greg), 4); *p++ = 0x24; p = emit32(p, am->Aam.IR.imm); @@ -2096,14 +2096,14 @@ static UChar* doAMode_M ( UChar* p, HReg greg, AMD64AMode* am ) } if (am->tag == Aam_IRRS) { if (fits8bits(am->Aam.IRRS.imm) - && am->Aam.IRRS.index != hregAMD64_RSP()) { + && ! sameHReg(am->Aam.IRRS.index, hregAMD64_RSP())) { *p++ = mkModRegRM(1, iregBits210(greg), 4); *p++ = mkSIB(am->Aam.IRRS.shift, iregBits210(am->Aam.IRRS.index), iregBits210(am->Aam.IRRS.base)); *p++ = toUChar(am->Aam.IRRS.imm & 0xFF); return p; } - if (am->Aam.IRRS.index != hregAMD64_RSP()) { + if (! sameHReg(am->Aam.IRRS.index, hregAMD64_RSP())) { *p++ = mkModRegRM(2, iregBits210(greg), 4); *p++ = mkSIB(am->Aam.IRRS.shift, iregBits210(am->Aam.IRRS.index), iregBits210(am->Aam.IRRS.base)); @@ -2410,7 +2410,7 @@ Int emit_AMD64Instr ( /*MB_MOD*/Bool* is_profInc, } switch (i->Ain.Alu64R.src->tag) { case Armi_Imm: - if (i->Ain.Alu64R.dst == hregAMD64_RAX() + if (sameHReg(i->Ain.Alu64R.dst, hregAMD64_RAX()) && !fits8bits(i->Ain.Alu64R.src->Armi.Imm.imm32)) { goto bad; /* FIXME: awaiting test case */ *p++ = toUChar(opc_imma); @@ -2541,7 +2541,7 @@ Int emit_AMD64Instr ( /*MB_MOD*/Bool* is_profInc, } switch (i->Ain.Alu32R.src->tag) { case Armi_Imm: - if (i->Ain.Alu32R.dst == hregAMD64_RAX() + if (sameHReg(i->Ain.Alu32R.dst, hregAMD64_RAX()) && !fits8bits(i->Ain.Alu32R.src->Armi.Imm.imm32)) { goto bad; /* FIXME: awaiting test case */ *p++ = toUChar(opc_imma); diff --git a/VEX/priv/host_amd64_isel.c b/VEX/priv/host_amd64_isel.c index 59df608533..49a3773596 100644 --- a/VEX/priv/host_amd64_isel.c +++ b/VEX/priv/host_amd64_isel.c @@ -175,7 +175,7 @@ static void lookupIRTempPair ( HReg* vrHI, HReg* vrLO, { vassert(tmp >= 0); vassert(tmp < env->n_vregmap); - vassert(env->vregmapHI[tmp] != INVALID_HREG); + vassert(! hregIsInvalid(env->vregmapHI[tmp])); *vrLO = env->vregmap[tmp]; *vrHI = env->vregmapHI[tmp]; } @@ -263,7 +263,7 @@ static Bool sane_AMode ( AMD64AMode* am ) return toBool( hregClass(am->Aam.IR.reg) == HRcInt64 && (hregIsVirtual(am->Aam.IR.reg) - || am->Aam.IR.reg == hregAMD64_RBP()) ); + || sameHReg(am->Aam.IR.reg, hregAMD64_RBP())) ); case Aam_IRRS: return toBool( hregClass(am->Aam.IRRS.base) == HRcInt64 diff --git a/VEX/priv/host_arm_isel.c b/VEX/priv/host_arm_isel.c index c3217de764..22c4efb791 100644 --- a/VEX/priv/host_arm_isel.c +++ b/VEX/priv/host_arm_isel.c @@ -137,7 +137,7 @@ static void lookupIRTemp64 ( HReg* vrHI, HReg* vrLO, ISelEnv* env, IRTemp tmp ) { vassert(tmp >= 0); vassert(tmp < env->n_vregmap); - vassert(env->vregmapHI[tmp] != INVALID_HREG); + vassert(! hregIsInvalid(env->vregmapHI[tmp])); *vrLO = env->vregmap[tmp]; *vrHI = env->vregmapHI[tmp]; } @@ -581,7 +581,7 @@ Bool doHelperCall ( ISelEnv* env, /* Move the args to their final destinations. */ for (i = 0; i < nextArgReg; i++) { - if (tmpregs[i] == INVALID_HREG) { // Skip invalid regs + if (hregIsInvalid(tmpregs[i])) { // Skip invalid regs addInstr(env, ARMInstr_Imm32( argregs[i], 0xAA )); continue; } @@ -654,7 +654,7 @@ static Bool sane_AMode1 ( ARMAMode1* am ) return toBool( hregClass(am->ARMam1.RI.reg) == HRcInt32 && (hregIsVirtual(am->ARMam1.RI.reg) - || am->ARMam1.RI.reg == hregARM_R8()) + || sameHReg(am->ARMam1.RI.reg, hregARM_R8())) && am->ARMam1.RI.simm13 >= -4095 && am->ARMam1.RI.simm13 <= 4095 ); case ARMam1_RRS: diff --git a/VEX/priv/host_generic_reg_alloc2.c b/VEX/priv/host_generic_reg_alloc2.c index a5e382b6a5..cd77e6bfb9 100644 --- a/VEX/priv/host_generic_reg_alloc2.c +++ b/VEX/priv/host_generic_reg_alloc2.c @@ -154,7 +154,7 @@ static Bool instrMentionsReg ( HRegUsage reg_usage; (*getRegUsage)(®_usage, instr, mode64); for (i = 0; i < reg_usage.n_used; i++) - if (reg_usage.hreg[i] == r) + if (sameHReg(reg_usage.hreg[i], r)) return True; return False; } @@ -621,7 +621,7 @@ HInstrArray* doRegisterAllocation ( stack pointer register, or some other register beyond our control, in which case we should just ignore it. */ for (k = 0; k < n_available_real_regs; k++) - if (available_real_regs[k] == rreg) + if (sameHReg(available_real_regs[k], rreg)) break; if (k == n_available_real_regs) continue; /* not found -- ignore. */ @@ -729,7 +729,7 @@ HInstrArray* doRegisterAllocation ( /* rreg is involved in a HLR. Record this info in the array, if there is space. */ for (k = 0; k < n_rregs; k++) - if (rreg_state[k].rreg == rreg) + if (sameHReg(rreg_state[k].rreg, rreg)) break; vassert(k < n_rregs); /* else rreg was not found in rreg_state?! */ rreg_state[k].has_hlrs = True; @@ -958,7 +958,7 @@ HInstrArray* doRegisterAllocation ( /* find the state entry for this rreg */ for (k = 0; k < n_rregs; k++) - if (rreg_state[k].rreg == rreg_lrs_la[j].rreg) + if (sameHReg(rreg_state[k].rreg, rreg_lrs_la[j].rreg)) break; /* and assert that this rreg is marked as unavailable */ @@ -977,7 +977,7 @@ HInstrArray* doRegisterAllocation ( if (rreg_state[j].disp != Unavail) continue; for (k = 0; k < rreg_lrs_used; k++) - if (rreg_lrs_la[k].rreg == rreg_state[j].rreg + if (sameHReg(rreg_lrs_la[k].rreg, rreg_state[j].rreg) && rreg_lrs_la[k].live_after < ii && ii < rreg_lrs_la[k].dead_before) break; @@ -1052,7 +1052,8 @@ HInstrArray* doRegisterAllocation ( # endif /* Find the state entry for vregS. */ for (m = 0; m < n_rregs; m++) - if (rreg_state[m].disp == Bound && rreg_state[m].vreg == vregS) + if (rreg_state[m].disp == Bound + && sameHReg(rreg_state[m].vreg, vregS)) break; if (m == n_rregs) /* We failed to find a binding for vregS, which means it's @@ -1142,7 +1143,7 @@ HInstrArray* doRegisterAllocation ( vex_printf("\n\n"); # endif for (k = 0; k < n_rregs; k++) - if (rreg_state[k].rreg == rreg_lrs_la[rreg_lrs_la_next].rreg) + if (sameHReg(rreg_state[k].rreg, rreg_lrs_la[rreg_lrs_la_next].rreg)) break; /* If this fails, we don't have an entry for this rreg. Which we should. */ @@ -1231,7 +1232,7 @@ HInstrArray* doRegisterAllocation ( /* ok, it is spilled. Now, is this its last use? */ vassert(vreg_lrs[m].dead_before >= ii+1); if (vreg_lrs[m].dead_before == ii+1 - && cand == INVALID_HREG) { + && hregIsInvalid(cand)) { spilloff = vreg_lrs[m].spill_offset; cand = vreg; } @@ -1239,10 +1240,10 @@ HInstrArray* doRegisterAllocation ( } } - if (nreads == 1 && cand != INVALID_HREG) { + if (nreads == 1 && ! hregIsInvalid(cand)) { HInstr* reloaded; if (reg_usage.n_used == 2) - vassert(reg_usage.hreg[0] != reg_usage.hreg[1]); + vassert(! sameHReg(reg_usage.hreg[0], reg_usage.hreg[1])); reloaded = directReload ( instrs_in->arr[ii], cand, spilloff ); if (debug_direct_reload && !reloaded) { @@ -1378,7 +1379,7 @@ HInstrArray* doRegisterAllocation ( continue; rreg_state[k].is_spill_cand = True; for (m = 0; m < reg_usage.n_used; m++) { - if (rreg_state[k].vreg == reg_usage.hreg[m]) { + if (sameHReg(rreg_state[k].vreg, reg_usage.hreg[m])) { rreg_state[k].is_spill_cand = False; break; } @@ -1410,7 +1411,7 @@ HInstrArray* doRegisterAllocation ( vassert(hregClass(rreg_state[spillee].rreg) == hregClass(vreg)); /* check we're not ejecting the vreg for which we are trying to free up a register. */ - vassert(rreg_state[spillee].vreg != vreg); + vassert(! sameHReg(rreg_state[spillee].vreg, vreg)); m = hregNumber(rreg_state[spillee].vreg); vassert(IS_VALID_VREGNO(m)); @@ -1508,7 +1509,7 @@ HInstrArray* doRegisterAllocation ( /* rreg_lrs_db[[rreg_lrs_db_next].rreg is exiting a hard live range. Mark it as such in the main rreg_state array. */ for (k = 0; k < n_rregs; k++) - if (rreg_state[k].rreg == rreg_lrs_db[rreg_lrs_db_next].rreg) + if (sameHReg(rreg_state[k].rreg, rreg_lrs_db[rreg_lrs_db_next].rreg)) break; /* If this vassertion fails, we don't have an entry for this rreg. Which we should. */ @@ -1538,7 +1539,7 @@ HInstrArray* doRegisterAllocation ( /* Paranoia */ for (j = 0; j < n_rregs; j++) - vassert(rreg_state[j].rreg == available_real_regs[j]); + vassert(sameHReg(rreg_state[j].rreg, available_real_regs[j])); vassert(rreg_lrs_la_next == rreg_lrs_used); vassert(rreg_lrs_db_next == rreg_lrs_used); diff --git a/VEX/priv/host_generic_regs.c b/VEX/priv/host_generic_regs.c index dfaf06a15b..7e1a1b2fbb 100644 --- a/VEX/priv/host_generic_regs.c +++ b/VEX/priv/host_generic_regs.c @@ -103,7 +103,7 @@ void addHRegUse ( HRegUsage* tab, HRegMode mode, HReg reg ) Int i; /* Find it ... */ for (i = 0; i < tab->n_used; i++) - if (tab->hreg[i] == reg) + if (sameHReg(tab->hreg[i], reg)) break; if (i == tab->n_used) { /* Not found, add new entry. */ @@ -161,7 +161,7 @@ void addToHRegRemap ( HRegRemap* map, HReg orig, HReg replacement ) { Int i; for (i = 0; i < map->n_used; i++) - if (map->orig[i] == orig) + if (sameHReg(map->orig[i], orig)) vpanic("addToHRegMap: duplicate entry"); if (!hregIsVirtual(orig)) vpanic("addToHRegMap: orig is not a vreg"); @@ -181,7 +181,7 @@ HReg lookupHRegRemap ( HRegRemap* map, HReg orig ) if (!hregIsVirtual(orig)) return orig; for (i = 0; i < map->n_used; i++) - if (map->orig[i] == orig) + if (sameHReg(map->orig[i], orig)) return map->replacement[i]; vpanic("lookupHRegRemap: not found"); } diff --git a/VEX/priv/host_generic_regs.h b/VEX/priv/host_generic_regs.h index 7f4153e19a..39bd97f59e 100644 --- a/VEX/priv/host_generic_regs.h +++ b/VEX/priv/host_generic_regs.h @@ -68,7 +68,11 @@ int32 int64 float32 float64 simd64 simd128 */ -typedef UInt HReg; +typedef + struct { + UInt reg; + } + HReg; /* When extending this, do not use any value > 14 or < 0. */ /* HRegClass describes host register classes which the instruction @@ -116,29 +120,37 @@ static inline HReg mkHReg ( UInt regno, HRegClass rc, Bool virtual ) { occupy 24 bits. */ if (r24 != regno) vpanic("mkHReg: regno exceeds 2^24"); - return regno | (((UInt)rc) << 28) | (virtual ? (1<<24) : 0); + HReg r; + r.reg = regno | (((UInt)rc) << 28) | (virtual ? (1<<24) : 0); + return r; } static inline HRegClass hregClass ( HReg r ) { - UInt rc = r; + UInt rc = r.reg; rc = (rc >> 28) & 0x0F; vassert(rc >= HRcInt32 && rc <= HRcVec128); return (HRegClass)rc; } static inline UInt hregNumber ( HReg r ) { - return ((UInt)r) & 0x00FFFFFF; + return r.reg & 0x00FFFFFF; } static inline Bool hregIsVirtual ( HReg r ) { - return toBool(((UInt)r) & (1<<24)); + return toBool(r.reg & (1<<24)); } +static inline Bool sameHReg ( HReg r1, HReg r2 ) +{ + return toBool(r1.reg == r2.reg); +} +static const HReg INVALID_HREG = { 0xFFFFFFFF }; - -#define INVALID_HREG ((HReg)0xFFFFFFFF) - +static inline Bool hregIsInvalid ( HReg r ) +{ + return sameHReg(r, INVALID_HREG); +} /*---------------------------------------------------------*/ /*--- Recording register usage (for reg-alloc) ---*/ diff --git a/VEX/priv/host_mips_isel.c b/VEX/priv/host_mips_isel.c index 9d56cd7971..ef38c16520 100644 --- a/VEX/priv/host_mips_isel.c +++ b/VEX/priv/host_mips_isel.c @@ -137,7 +137,7 @@ static void lookupIRTemp64(HReg * vrHI, HReg * vrLO, ISelEnv * env, IRTemp tmp) { vassert(tmp >= 0); vassert(tmp < env->n_vregmap); - vassert(env->vregmapHI[tmp] != INVALID_HREG); + vassert(! hregIsInvalid(env->vregmapHI[tmp])); *vrLO = env->vregmap[tmp]; *vrHI = env->vregmapHI[tmp]; } @@ -148,7 +148,7 @@ lookupIRTempPair(HReg * vrHI, HReg * vrLO, ISelEnv * env, IRTemp tmp) vassert(env->mode64); vassert(tmp >= 0); vassert(tmp < env->n_vregmap); - vassert(env->vregmapHI[tmp] != INVALID_HREG); + vassert(! hregIsInvalid(env->vregmapHI[tmp])); *vrLO = env->vregmap[tmp]; *vrHI = env->vregmapHI[tmp]; } @@ -512,7 +512,7 @@ static void doHelperCall(ISelEnv * env, Bool passBBP, IRExpr * guard, } /* Move the args to their final destinations. */ for (i = 0; i < argreg; i++) { - if (tmpregs[i] == INVALID_HREG) // Skip invalid regs + if (hregIsInvalid(tmpregs[i])) // Skip invalid regs continue; /* None of these insns, including any spill code that might be generated, may alter the condition codes. */ diff --git a/VEX/priv/host_ppc_defs.c b/VEX/priv/host_ppc_defs.c index 184620f02d..28dbac2cc4 100644 --- a/VEX/priv/host_ppc_defs.c +++ b/VEX/priv/host_ppc_defs.c @@ -1451,7 +1451,7 @@ void ppPPCInstr ( PPCInstr* i, Bool mode64 ) /* special-case "mr" */ if (i->Pin.Alu.op == Palu_OR && // or Rd,Rs,Rs == mr Rd,Rs rh_srcR->tag == Prh_Reg && - rh_srcR->Prh.Reg.reg == r_srcL) { + sameHReg(rh_srcR->Prh.Reg.reg, r_srcL)) { vex_printf("mr "); ppHRegPPC(i->Pin.Alu.dst); vex_printf(","); @@ -2338,8 +2338,8 @@ void getRegUsage_PPCInstr ( HRegUsage* u, PPCInstr* i, Bool mode64 ) return; case Pin_AvBinary: if (i->Pin.AvBinary.op == Pav_XOR - && i->Pin.AvBinary.dst == i->Pin.AvBinary.srcL - && i->Pin.AvBinary.dst == i->Pin.AvBinary.srcR) { + && sameHReg(i->Pin.AvBinary.dst, i->Pin.AvBinary.srcL) + && sameHReg(i->Pin.AvBinary.dst, i->Pin.AvBinary.srcR)) { /* reg-alloc needs to understand 'xor r,r,r' as a write of r */ /* (as opposed to a rite of passage :-) */ addHRegUse(u, HRmWrite, i->Pin.AvBinary.dst); @@ -2826,7 +2826,7 @@ Bool isMove_PPCInstr ( PPCInstr* i, HReg* src, HReg* dst ) return False; if (i->Pin.Alu.srcR->tag != Prh_Reg) return False; - if (i->Pin.Alu.srcR->Prh.Reg.reg != i->Pin.Alu.srcL) + if (! sameHReg(i->Pin.Alu.srcR->Prh.Reg.reg, i->Pin.Alu.srcL)) return False; *src = i->Pin.Alu.srcL; *dst = i->Pin.Alu.dst; diff --git a/VEX/priv/host_ppc_isel.c b/VEX/priv/host_ppc_isel.c index 992acb5287..d52e3740b5 100644 --- a/VEX/priv/host_ppc_isel.c +++ b/VEX/priv/host_ppc_isel.c @@ -311,7 +311,7 @@ static void lookupIRTempPair ( HReg* vrHI, HReg* vrLO, { vassert(tmp >= 0); vassert(tmp < env->n_vregmap); - vassert(env->vregmapMedLo[tmp] != INVALID_HREG); + vassert(! hregIsInvalid(env->vregmapMedLo[tmp])); *vrLO = env->vregmapLo[tmp]; *vrHI = env->vregmapMedLo[tmp]; } @@ -323,7 +323,7 @@ static void lookupIRTempQuad ( HReg* vrHi, HReg* vrMedHi, HReg* vrMedLo, vassert(!env->mode64); vassert(tmp >= 0); vassert(tmp < env->n_vregmap); - vassert(env->vregmapMedLo[tmp] != INVALID_HREG); + vassert(! hregIsInvalid(env->vregmapMedLo[tmp])); *vrHi = env->vregmapHi[tmp]; *vrMedHi = env->vregmapMedHi[tmp]; *vrMedLo = env->vregmapMedLo[tmp]; @@ -897,7 +897,7 @@ void doHelperCall ( ISelEnv* env, /* Move the args to their final destinations. */ for (i = 0; i < argreg; i++) { - if (tmpregs[i] == INVALID_HREG) // Skip invalid regs + if (hregIsInvalid(tmpregs[i])) // Skip invalid regs continue; /* None of these insns, including any spill code that might be generated, may alter the condition codes. */ diff --git a/VEX/priv/host_s390_defs.c b/VEX/priv/host_s390_defs.c index 26988d80b7..ff773e01a8 100644 --- a/VEX/priv/host_s390_defs.c +++ b/VEX/priv/host_s390_defs.c @@ -646,7 +646,7 @@ s390_insn_get_reg_usage(HRegUsage *u, const s390_insn *insn) for (i = 1; i <= 5; ++i) { addHRegUse(u, HRmWrite, mkHReg(i, HRcInt64, False)); } - if (insn->variant.helper_call.dst != INVALID_HREG) + if (! hregIsInvalid(insn->variant.helper_call.dst)) addHRegUse(u, HRmWrite, insn->variant.helper_call.dst); /* Ditto for floating point registers. f0 - f7 are volatile */ @@ -706,10 +706,10 @@ s390_insn_get_reg_usage(HRegUsage *u, const s390_insn *insn) case S390_INSN_BFP_CONVERT: addHRegUse(u, HRmWrite, insn->variant.bfp_convert.dst_hi); - if (insn->variant.bfp_convert.dst_lo != INVALID_HREG) + if (! hregIsInvalid(insn->variant.bfp_convert.dst_lo)) addHRegUse(u, HRmWrite, insn->variant.bfp_convert.dst_lo); addHRegUse(u, HRmRead, insn->variant.bfp_convert.op_hi); - if (insn->variant.bfp_convert.op_lo != INVALID_HREG) + if (! hregIsInvalid(insn->variant.bfp_convert.op_lo)) addHRegUse(u, HRmRead, insn->variant.bfp_convert.op_lo); break; @@ -758,10 +758,10 @@ s390_insn_get_reg_usage(HRegUsage *u, const s390_insn *insn) case S390_INSN_DFP_CONVERT: addHRegUse(u, HRmWrite, insn->variant.dfp_convert.dst_hi); - if (insn->variant.dfp_convert.dst_lo != INVALID_HREG) + if (! hregIsInvalid(insn->variant.dfp_convert.dst_lo)) addHRegUse(u, HRmWrite, insn->variant.dfp_convert.dst_lo); addHRegUse(u, HRmRead, insn->variant.dfp_convert.op_hi); /* operand */ - if (insn->variant.dfp_convert.op_lo != INVALID_HREG) + if (! hregIsInvalid(insn->variant.dfp_convert.op_lo)) addHRegUse(u, HRmRead, insn->variant.dfp_convert.op_lo); /* operand */ break; @@ -946,7 +946,7 @@ s390_insn_map_regs(HRegRemap *m, s390_insn *insn) As for the arguments of the helper call -- they will be loaded into non-virtual registers. Again, we don't need to do anything for those here. */ - if (insn->variant.helper_call.dst != INVALID_HREG) + if (! hregIsInvalid(insn->variant.helper_call.dst)) insn->variant.helper_call.dst = lookupHRegRemap(m, insn->variant.helper_call.dst); break; @@ -1003,12 +1003,12 @@ s390_insn_map_regs(HRegRemap *m, s390_insn *insn) case S390_INSN_BFP_CONVERT: insn->variant.bfp_convert.dst_hi = lookupHRegRemap(m, insn->variant.bfp_convert.dst_hi); - if (insn->variant.bfp_convert.dst_lo != INVALID_HREG) + if (! hregIsInvalid(insn->variant.bfp_convert.dst_lo)) insn->variant.bfp_convert.dst_lo = lookupHRegRemap(m, insn->variant.bfp_convert.dst_lo); insn->variant.bfp_convert.op_hi = lookupHRegRemap(m, insn->variant.bfp_convert.op_hi); - if (insn->variant.bfp_convert.op_lo != INVALID_HREG) + if (! hregIsInvalid(insn->variant.bfp_convert.op_lo)) insn->variant.bfp_convert.op_lo = lookupHRegRemap(m, insn->variant.bfp_convert.op_lo); break; @@ -1073,12 +1073,12 @@ s390_insn_map_regs(HRegRemap *m, s390_insn *insn) case S390_INSN_DFP_CONVERT: insn->variant.dfp_convert.dst_hi = lookupHRegRemap(m, insn->variant.dfp_convert.dst_hi); - if (insn->variant.dfp_convert.dst_lo != INVALID_HREG) + if (! hregIsInvalid(insn->variant.dfp_convert.dst_lo)) insn->variant.dfp_convert.dst_lo = lookupHRegRemap(m, insn->variant.dfp_convert.dst_lo); insn->variant.dfp_convert.op_hi = lookupHRegRemap(m, insn->variant.dfp_convert.op_hi); - if (insn->variant.dfp_convert.op_lo != INVALID_HREG) + if (! hregIsInvalid(insn->variant.dfp_convert.op_lo)) insn->variant.dfp_convert.op_lo = lookupHRegRemap(m, insn->variant.dfp_convert.op_lo); break; @@ -5716,11 +5716,11 @@ s390_insn_bfp128_convert(UChar size, s390_bfp_conv_t tag, HReg dst_hi, if (size == 16) { /* From smaller size to 16 bytes */ vassert(is_valid_fp128_regpair(dst_hi, dst_lo)); - vassert(op_lo == INVALID_HREG); + vassert(hregIsInvalid(op_lo)); } else { /* From 16 bytes to smaller size */ vassert(is_valid_fp128_regpair(op_hi, op_lo)); - vassert(dst_lo == INVALID_HREG); + vassert(hregIsInvalid(dst_lo)); } insn->tag = S390_INSN_BFP_CONVERT; @@ -5975,11 +5975,11 @@ s390_insn_dfp128_convert(UChar size, s390_dfp_conv_t tag, HReg dst_hi, if (size == 16) { /* From smaller size to 16 bytes */ vassert(is_valid_fp128_regpair(dst_hi, dst_lo)); - vassert(op_lo == INVALID_HREG); + vassert(hregIsInvalid(op_lo)); } else { /* From 16 bytes to smaller size */ vassert(is_valid_fp128_regpair(op_hi, op_lo)); - vassert(dst_lo == INVALID_HREG); + vassert(hregIsInvalid(dst_lo)); } insn->tag = S390_INSN_DFP_CONVERT; @@ -6500,7 +6500,7 @@ s390_insn_as_string(const s390_insn *insn) break; case S390_INSN_HELPER_CALL: { - if (insn->variant.helper_call.dst != INVALID_HREG) { + if (! hregIsInvalid(insn->variant.helper_call.dst)) { s390_sprintf(buf, "%M if (%C) %R = %s{%I}(%L)", "v-call", insn->variant.helper_call.cond, insn->variant.helper_call.dst, @@ -8434,7 +8434,7 @@ s390_insn_helper_call_emit(UChar *buf, const s390_insn *insn) target = insn->variant.helper_call.target; if (cond != S390_CC_ALWAYS - && insn->variant.helper_call.dst != INVALID_HREG) { + && ! hregIsInvalid(insn->variant.helper_call.dst)) { /* The call might not happen (it isn't unconditional) and it returns a result. In this case we will need to generate a control flow diamond to put 0x555..555 in the return @@ -8472,7 +8472,7 @@ s390_insn_helper_call_emit(UChar *buf, const s390_insn *insn) buf = s390_emit_BASR(buf, S390_REGNO_LINK_REGISTER, 1); // call helper /* Move the return value to the destination register */ - if (insn->variant.helper_call.dst != INVALID_HREG) { + if (! hregIsInvalid(insn->variant.helper_call.dst)) { buf = s390_emit_LGR(buf, hregNumber(insn->variant.helper_call.dst), S390_REGNO_RETURN_VALUE); } diff --git a/VEX/priv/host_s390_isel.c b/VEX/priv/host_s390_isel.c index cfca458c3a..1203459920 100644 --- a/VEX/priv/host_s390_isel.c +++ b/VEX/priv/host_s390_isel.c @@ -196,7 +196,7 @@ static HReg lookupIRTemp(ISelEnv *env, IRTemp tmp) { vassert(tmp < env->n_vregmap); - vassert(env->vregmap[tmp] != INVALID_HREG); + vassert(! hregIsInvalid(env->vregmap[tmp])); return env->vregmap[tmp]; } @@ -207,7 +207,7 @@ static void lookupIRTemp128(HReg *hi, HReg *lo, ISelEnv *env, IRTemp tmp) { vassert(tmp < env->n_vregmap); - vassert(env->vregmapHI[tmp] != INVALID_HREG); + vassert(! hregIsInvalid(env->vregmapHI[tmp])); *lo = env->vregmap[tmp]; *hi = env->vregmapHI[tmp]; diff --git a/VEX/priv/host_x86_defs.c b/VEX/priv/host_x86_defs.c index a01a3deb70..8d17f0ef45 100644 --- a/VEX/priv/host_x86_defs.c +++ b/VEX/priv/host_x86_defs.c @@ -1455,7 +1455,7 @@ void getRegUsage_X86Instr (HRegUsage* u, X86Instr* i, Bool mode64) return; case Xin_SseReRg: if (i->Xin.SseReRg.op == Xsse_XOR - && i->Xin.SseReRg.src == i->Xin.SseReRg.dst) { + && sameHReg(i->Xin.SseReRg.src, i->Xin.SseReRg.dst)) { /* reg-alloc needs to understand 'xor r,r' as a write of r */ /* (as opposed to a rite of passage :-) */ addHRegUse(u, HRmWrite, i->Xin.SseReRg.dst); @@ -1768,8 +1768,8 @@ X86Instr* directReload_X86( X86Instr* i, HReg vreg, Short spill_off ) && (i->Xin.Alu32R.op == Xalu_MOV || i->Xin.Alu32R.op == Xalu_OR || i->Xin.Alu32R.op == Xalu_XOR) && i->Xin.Alu32R.src->tag == Xrmi_Reg - && i->Xin.Alu32R.src->Xrmi.Reg.reg == vreg) { - vassert(i->Xin.Alu32R.dst != vreg); + && sameHReg(i->Xin.Alu32R.src->Xrmi.Reg.reg, vreg)) { + vassert(! sameHReg(i->Xin.Alu32R.dst, vreg)); return X86Instr_Alu32R( i->Xin.Alu32R.op, X86RMI_Mem( X86AMode_IR( spill_off, hregX86_EBP())), @@ -1783,7 +1783,7 @@ X86Instr* directReload_X86( X86Instr* i, HReg vreg, Short spill_off ) if (i->tag == Xin_Alu32R && (i->Xin.Alu32R.op == Xalu_CMP) && i->Xin.Alu32R.src->tag == Xrmi_Imm - && i->Xin.Alu32R.dst == vreg) { + && sameHReg(i->Xin.Alu32R.dst, vreg)) { return X86Instr_Alu32M( i->Xin.Alu32R.op, X86RI_Imm( i->Xin.Alu32R.src->Xrmi.Imm.imm32 ), @@ -1796,7 +1796,7 @@ X86Instr* directReload_X86( X86Instr* i, HReg vreg, Short spill_off ) */ if (i->tag == Xin_Push && i->Xin.Push.src->tag == Xrmi_Reg - && i->Xin.Push.src->Xrmi.Reg.reg == vreg) { + && sameHReg(i->Xin.Push.src->Xrmi.Reg.reg, vreg)) { return X86Instr_Push( X86RMI_Mem( X86AMode_IR( spill_off, hregX86_EBP())) ); @@ -1806,8 +1806,8 @@ X86Instr* directReload_X86( X86Instr* i, HReg vreg, Short spill_off ) Convert to CMov32(RM_Mem, dst) */ if (i->tag == Xin_CMov32 && i->Xin.CMov32.src->tag == Xrm_Reg - && i->Xin.CMov32.src->Xrm.Reg.reg == vreg) { - vassert(i->Xin.CMov32.dst != vreg); + && sameHReg(i->Xin.CMov32.src->Xrm.Reg.reg, vreg)) { + vassert(! sameHReg(i->Xin.CMov32.dst, vreg)); return X86Instr_CMov32( i->Xin.CMov32.cond, X86RM_Mem( X86AMode_IR( spill_off, hregX86_EBP() )), @@ -1818,7 +1818,7 @@ X86Instr* directReload_X86( X86Instr* i, HReg vreg, Short spill_off ) /* Deal with form: Test32(imm,RM_Reg vreg) -> Test32(imm,amode) */ if (i->tag == Xin_Test32 && i->Xin.Test32.dst->tag == Xrm_Reg - && i->Xin.Test32.dst->Xrm.Reg.reg == vreg) { + && sameHReg(i->Xin.Test32.dst->Xrm.Reg.reg, vreg)) { return X86Instr_Test32( i->Xin.Test32.imm32, X86RM_Mem( X86AMode_IR( spill_off, hregX86_EBP() ) ) @@ -1924,23 +1924,23 @@ static UChar* doAMode_M ( UChar* p, HReg greg, X86AMode* am ) { if (am->tag == Xam_IR) { if (am->Xam.IR.imm == 0 - && am->Xam.IR.reg != hregX86_ESP() - && am->Xam.IR.reg != hregX86_EBP() ) { + && ! sameHReg(am->Xam.IR.reg, hregX86_ESP()) + && ! sameHReg(am->Xam.IR.reg, hregX86_EBP()) ) { *p++ = mkModRegRM(0, iregNo(greg), iregNo(am->Xam.IR.reg)); return p; } if (fits8bits(am->Xam.IR.imm) - && am->Xam.IR.reg != hregX86_ESP()) { + && ! sameHReg(am->Xam.IR.reg, hregX86_ESP())) { *p++ = mkModRegRM(1, iregNo(greg), iregNo(am->Xam.IR.reg)); *p++ = toUChar(am->Xam.IR.imm & 0xFF); return p; } - if (am->Xam.IR.reg != hregX86_ESP()) { + if (! sameHReg(am->Xam.IR.reg, hregX86_ESP())) { *p++ = mkModRegRM(2, iregNo(greg), iregNo(am->Xam.IR.reg)); p = emit32(p, am->Xam.IR.imm); return p; } - if (am->Xam.IR.reg == hregX86_ESP() + if (sameHReg(am->Xam.IR.reg, hregX86_ESP()) && fits8bits(am->Xam.IR.imm)) { *p++ = mkModRegRM(1, iregNo(greg), 4); *p++ = 0x24; @@ -1953,14 +1953,14 @@ static UChar* doAMode_M ( UChar* p, HReg greg, X86AMode* am ) } if (am->tag == Xam_IRRS) { if (fits8bits(am->Xam.IRRS.imm) - && am->Xam.IRRS.index != hregX86_ESP()) { + && ! sameHReg(am->Xam.IRRS.index, hregX86_ESP())) { *p++ = mkModRegRM(1, iregNo(greg), 4); *p++ = mkSIB(am->Xam.IRRS.shift, iregNo(am->Xam.IRRS.index), iregNo(am->Xam.IRRS.base)); *p++ = toUChar(am->Xam.IRRS.imm & 0xFF); return p; } - if (am->Xam.IRRS.index != hregX86_ESP()) { + if (! sameHReg(am->Xam.IRRS.index, hregX86_ESP())) { *p++ = mkModRegRM(2, iregNo(greg), 4); *p++ = mkSIB(am->Xam.IRRS.shift, iregNo(am->Xam.IRRS.index), iregNo(am->Xam.IRRS.base)); @@ -2185,7 +2185,7 @@ Int emit_X86Instr ( /*MB_MOD*/Bool* is_profInc, } switch (i->Xin.Alu32R.src->tag) { case Xrmi_Imm: - if (i->Xin.Alu32R.dst == hregX86_EAX() + if (sameHReg(i->Xin.Alu32R.dst, hregX86_EAX()) && !fits8bits(i->Xin.Alu32R.src->Xrmi.Imm.imm32)) { *p++ = toUChar(opc_imma); p = emit32(p, i->Xin.Alu32R.src->Xrmi.Imm.imm32); @@ -2776,16 +2776,16 @@ Int emit_X86Instr ( /*MB_MOD*/Bool* is_profInc, addRegUsage_X86AMode(&u, i->Xin.Store.dst); for (j = 0; j < u.n_used; j++) { HReg r = u.hreg[j]; - if (r == eax) a_ok = False; - if (r == ebx) b_ok = False; - if (r == ecx) c_ok = False; - if (r == edx) d_ok = False; + if (sameHReg(r, eax)) a_ok = False; + if (sameHReg(r, ebx)) b_ok = False; + if (sameHReg(r, ecx)) c_ok = False; + if (sameHReg(r, edx)) d_ok = False; } if (a_ok) swap = eax; if (b_ok) swap = ebx; if (c_ok) swap = ecx; if (d_ok) swap = edx; - vassert(swap != INVALID_HREG); + vassert(! hregIsInvalid(swap)); /* xchgl %source, %swap. Could do better if swap is %eax. */ *p++ = 0x87; p = doAMode_R(p, i->Xin.Store.src, swap); diff --git a/VEX/priv/host_x86_isel.c b/VEX/priv/host_x86_isel.c index e59af20572..4ef69716c9 100644 --- a/VEX/priv/host_x86_isel.c +++ b/VEX/priv/host_x86_isel.c @@ -201,7 +201,7 @@ static void lookupIRTemp64 ( HReg* vrHI, HReg* vrLO, ISelEnv* env, IRTemp tmp ) { vassert(tmp >= 0); vassert(tmp < env->n_vregmap); - vassert(env->vregmapHI[tmp] != INVALID_HREG); + vassert(! hregIsInvalid(env->vregmapHI[tmp])); *vrLO = env->vregmap[tmp]; *vrHI = env->vregmapHI[tmp]; } @@ -1431,7 +1431,7 @@ static Bool sane_AMode ( X86AMode* am ) return toBool( hregClass(am->Xam.IR.reg) == HRcInt32 && (hregIsVirtual(am->Xam.IR.reg) - || am->Xam.IR.reg == hregX86_EBP()) ); + || sameHReg(am->Xam.IR.reg, hregX86_EBP())) ); case Xam_IRRS: return toBool( hregClass(am->Xam.IRRS.base) == HRcInt32