case Ain_SseReRg:
if ( (i->Ain.SseReRg.op == Asse_XOR
|| i->Ain.SseReRg.op == Asse_CMPEQ32)
- && i->Ain.SseReRg.src == i->Ain.SseReRg.dst) {
+ && sameHReg(i->Ain.SseReRg.src, i->Ain.SseReRg.dst)) {
/* reg-alloc needs to understand 'xor r,r' and 'cmpeqd
r,r' as a write of a value to r, and independent of any
previous value in r */
{
if (am->tag == Aam_IR) {
if (am->Aam.IR.imm == 0
- && am->Aam.IR.reg != hregAMD64_RSP()
- && am->Aam.IR.reg != hregAMD64_RBP()
- && am->Aam.IR.reg != hregAMD64_R12()
- && am->Aam.IR.reg != hregAMD64_R13()
+ && ! sameHReg(am->Aam.IR.reg, hregAMD64_RSP())
+ && ! sameHReg(am->Aam.IR.reg, hregAMD64_RBP())
+ && ! sameHReg(am->Aam.IR.reg, hregAMD64_R12())
+ && ! sameHReg(am->Aam.IR.reg, hregAMD64_R13())
) {
*p++ = mkModRegRM(0, iregBits210(greg),
iregBits210(am->Aam.IR.reg));
return p;
}
if (fits8bits(am->Aam.IR.imm)
- && am->Aam.IR.reg != hregAMD64_RSP()
- && am->Aam.IR.reg != hregAMD64_R12()
+ && ! sameHReg(am->Aam.IR.reg, hregAMD64_RSP())
+ && ! sameHReg(am->Aam.IR.reg, hregAMD64_R12())
) {
*p++ = mkModRegRM(1, iregBits210(greg),
iregBits210(am->Aam.IR.reg));
*p++ = toUChar(am->Aam.IR.imm & 0xFF);
return p;
}
- if (am->Aam.IR.reg != hregAMD64_RSP()
- && am->Aam.IR.reg != hregAMD64_R12()
+ if (! sameHReg(am->Aam.IR.reg, hregAMD64_RSP())
+ && ! sameHReg(am->Aam.IR.reg, hregAMD64_R12())
) {
*p++ = mkModRegRM(2, iregBits210(greg),
iregBits210(am->Aam.IR.reg));
p = emit32(p, am->Aam.IR.imm);
return p;
}
- if ((am->Aam.IR.reg == hregAMD64_RSP()
- || am->Aam.IR.reg == hregAMD64_R12())
+ if ((sameHReg(am->Aam.IR.reg, hregAMD64_RSP())
+ || sameHReg(am->Aam.IR.reg, hregAMD64_R12()))
&& fits8bits(am->Aam.IR.imm)) {
*p++ = mkModRegRM(1, iregBits210(greg), 4);
*p++ = 0x24;
*p++ = toUChar(am->Aam.IR.imm & 0xFF);
return p;
}
- if (/* (am->Aam.IR.reg == hregAMD64_RSP()
+ if (/* (sameHReg(am->Aam.IR.reg, hregAMD64_RSP())
|| wait for test case for RSP case */
- am->Aam.IR.reg == hregAMD64_R12()) {
+ sameHReg(am->Aam.IR.reg, hregAMD64_R12())) {
*p++ = mkModRegRM(2, iregBits210(greg), 4);
*p++ = 0x24;
p = emit32(p, am->Aam.IR.imm);
}
if (am->tag == Aam_IRRS) {
if (fits8bits(am->Aam.IRRS.imm)
- && am->Aam.IRRS.index != hregAMD64_RSP()) {
+ && ! sameHReg(am->Aam.IRRS.index, hregAMD64_RSP())) {
*p++ = mkModRegRM(1, iregBits210(greg), 4);
*p++ = mkSIB(am->Aam.IRRS.shift, iregBits210(am->Aam.IRRS.index),
iregBits210(am->Aam.IRRS.base));
*p++ = toUChar(am->Aam.IRRS.imm & 0xFF);
return p;
}
- if (am->Aam.IRRS.index != hregAMD64_RSP()) {
+ if (! sameHReg(am->Aam.IRRS.index, hregAMD64_RSP())) {
*p++ = mkModRegRM(2, iregBits210(greg), 4);
*p++ = mkSIB(am->Aam.IRRS.shift, iregBits210(am->Aam.IRRS.index),
iregBits210(am->Aam.IRRS.base));
}
switch (i->Ain.Alu64R.src->tag) {
case Armi_Imm:
- if (i->Ain.Alu64R.dst == hregAMD64_RAX()
+ if (sameHReg(i->Ain.Alu64R.dst, hregAMD64_RAX())
&& !fits8bits(i->Ain.Alu64R.src->Armi.Imm.imm32)) {
goto bad; /* FIXME: awaiting test case */
*p++ = toUChar(opc_imma);
}
switch (i->Ain.Alu32R.src->tag) {
case Armi_Imm:
- if (i->Ain.Alu32R.dst == hregAMD64_RAX()
+ if (sameHReg(i->Ain.Alu32R.dst, hregAMD64_RAX())
&& !fits8bits(i->Ain.Alu32R.src->Armi.Imm.imm32)) {
goto bad; /* FIXME: awaiting test case */
*p++ = toUChar(opc_imma);
{
vassert(tmp >= 0);
vassert(tmp < env->n_vregmap);
- vassert(env->vregmapHI[tmp] != INVALID_HREG);
+ vassert(! hregIsInvalid(env->vregmapHI[tmp]));
*vrLO = env->vregmap[tmp];
*vrHI = env->vregmapHI[tmp];
}
return
toBool( hregClass(am->Aam.IR.reg) == HRcInt64
&& (hregIsVirtual(am->Aam.IR.reg)
- || am->Aam.IR.reg == hregAMD64_RBP()) );
+ || sameHReg(am->Aam.IR.reg, hregAMD64_RBP())) );
case Aam_IRRS:
return
toBool( hregClass(am->Aam.IRRS.base) == HRcInt64
{
vassert(tmp >= 0);
vassert(tmp < env->n_vregmap);
- vassert(env->vregmapHI[tmp] != INVALID_HREG);
+ vassert(! hregIsInvalid(env->vregmapHI[tmp]));
*vrLO = env->vregmap[tmp];
*vrHI = env->vregmapHI[tmp];
}
/* Move the args to their final destinations. */
for (i = 0; i < nextArgReg; i++) {
- if (tmpregs[i] == INVALID_HREG) { // Skip invalid regs
+ if (hregIsInvalid(tmpregs[i])) { // Skip invalid regs
addInstr(env, ARMInstr_Imm32( argregs[i], 0xAA ));
continue;
}
return
toBool( hregClass(am->ARMam1.RI.reg) == HRcInt32
&& (hregIsVirtual(am->ARMam1.RI.reg)
- || am->ARMam1.RI.reg == hregARM_R8())
+ || sameHReg(am->ARMam1.RI.reg, hregARM_R8()))
&& am->ARMam1.RI.simm13 >= -4095
&& am->ARMam1.RI.simm13 <= 4095 );
case ARMam1_RRS:
HRegUsage reg_usage;
(*getRegUsage)(®_usage, instr, mode64);
for (i = 0; i < reg_usage.n_used; i++)
- if (reg_usage.hreg[i] == r)
+ if (sameHReg(reg_usage.hreg[i], r))
return True;
return False;
}
stack pointer register, or some other register beyond our
control, in which case we should just ignore it. */
for (k = 0; k < n_available_real_regs; k++)
- if (available_real_regs[k] == rreg)
+ if (sameHReg(available_real_regs[k], rreg))
break;
if (k == n_available_real_regs)
continue; /* not found -- ignore. */
/* rreg is involved in a HLR. Record this info in the array, if
there is space. */
for (k = 0; k < n_rregs; k++)
- if (rreg_state[k].rreg == rreg)
+ if (sameHReg(rreg_state[k].rreg, rreg))
break;
vassert(k < n_rregs); /* else rreg was not found in rreg_state?! */
rreg_state[k].has_hlrs = True;
/* find the state entry for this rreg */
for (k = 0; k < n_rregs; k++)
- if (rreg_state[k].rreg == rreg_lrs_la[j].rreg)
+ if (sameHReg(rreg_state[k].rreg, rreg_lrs_la[j].rreg))
break;
/* and assert that this rreg is marked as unavailable */
if (rreg_state[j].disp != Unavail)
continue;
for (k = 0; k < rreg_lrs_used; k++)
- if (rreg_lrs_la[k].rreg == rreg_state[j].rreg
+ if (sameHReg(rreg_lrs_la[k].rreg, rreg_state[j].rreg)
&& rreg_lrs_la[k].live_after < ii
&& ii < rreg_lrs_la[k].dead_before)
break;
# endif
/* Find the state entry for vregS. */
for (m = 0; m < n_rregs; m++)
- if (rreg_state[m].disp == Bound && rreg_state[m].vreg == vregS)
+ if (rreg_state[m].disp == Bound
+ && sameHReg(rreg_state[m].vreg, vregS))
break;
if (m == n_rregs)
/* We failed to find a binding for vregS, which means it's
vex_printf("\n\n");
# endif
for (k = 0; k < n_rregs; k++)
- if (rreg_state[k].rreg == rreg_lrs_la[rreg_lrs_la_next].rreg)
+ if (sameHReg(rreg_state[k].rreg, rreg_lrs_la[rreg_lrs_la_next].rreg))
break;
/* If this fails, we don't have an entry for this rreg.
Which we should. */
/* ok, it is spilled. Now, is this its last use? */
vassert(vreg_lrs[m].dead_before >= ii+1);
if (vreg_lrs[m].dead_before == ii+1
- && cand == INVALID_HREG) {
+ && hregIsInvalid(cand)) {
spilloff = vreg_lrs[m].spill_offset;
cand = vreg;
}
}
}
- if (nreads == 1 && cand != INVALID_HREG) {
+ if (nreads == 1 && ! hregIsInvalid(cand)) {
HInstr* reloaded;
if (reg_usage.n_used == 2)
- vassert(reg_usage.hreg[0] != reg_usage.hreg[1]);
+ vassert(! sameHReg(reg_usage.hreg[0], reg_usage.hreg[1]));
reloaded = directReload ( instrs_in->arr[ii], cand, spilloff );
if (debug_direct_reload && !reloaded) {
continue;
rreg_state[k].is_spill_cand = True;
for (m = 0; m < reg_usage.n_used; m++) {
- if (rreg_state[k].vreg == reg_usage.hreg[m]) {
+ if (sameHReg(rreg_state[k].vreg, reg_usage.hreg[m])) {
rreg_state[k].is_spill_cand = False;
break;
}
vassert(hregClass(rreg_state[spillee].rreg) == hregClass(vreg));
/* check we're not ejecting the vreg for which we are trying
to free up a register. */
- vassert(rreg_state[spillee].vreg != vreg);
+ vassert(! sameHReg(rreg_state[spillee].vreg, vreg));
m = hregNumber(rreg_state[spillee].vreg);
vassert(IS_VALID_VREGNO(m));
/* rreg_lrs_db[[rreg_lrs_db_next].rreg is exiting a hard live
range. Mark it as such in the main rreg_state array. */
for (k = 0; k < n_rregs; k++)
- if (rreg_state[k].rreg == rreg_lrs_db[rreg_lrs_db_next].rreg)
+ if (sameHReg(rreg_state[k].rreg, rreg_lrs_db[rreg_lrs_db_next].rreg))
break;
/* If this vassertion fails, we don't have an entry for
this rreg. Which we should. */
/* Paranoia */
for (j = 0; j < n_rregs; j++)
- vassert(rreg_state[j].rreg == available_real_regs[j]);
+ vassert(sameHReg(rreg_state[j].rreg, available_real_regs[j]));
vassert(rreg_lrs_la_next == rreg_lrs_used);
vassert(rreg_lrs_db_next == rreg_lrs_used);
Int i;
/* Find it ... */
for (i = 0; i < tab->n_used; i++)
- if (tab->hreg[i] == reg)
+ if (sameHReg(tab->hreg[i], reg))
break;
if (i == tab->n_used) {
/* Not found, add new entry. */
{
Int i;
for (i = 0; i < map->n_used; i++)
- if (map->orig[i] == orig)
+ if (sameHReg(map->orig[i], orig))
vpanic("addToHRegMap: duplicate entry");
if (!hregIsVirtual(orig))
vpanic("addToHRegMap: orig is not a vreg");
if (!hregIsVirtual(orig))
return orig;
for (i = 0; i < map->n_used; i++)
- if (map->orig[i] == orig)
+ if (sameHReg(map->orig[i], orig))
return map->replacement[i];
vpanic("lookupHRegRemap: not found");
}
int32 int64 float32 float64 simd64 simd128
*/
-typedef UInt HReg;
+typedef
+ struct {
+ UInt reg;
+ }
+ HReg;
/* When extending this, do not use any value > 14 or < 0. */
/* HRegClass describes host register classes which the instruction
occupy 24 bits. */
if (r24 != regno)
vpanic("mkHReg: regno exceeds 2^24");
- return regno | (((UInt)rc) << 28) | (virtual ? (1<<24) : 0);
+ HReg r;
+ r.reg = regno | (((UInt)rc) << 28) | (virtual ? (1<<24) : 0);
+ return r;
}
static inline HRegClass hregClass ( HReg r ) {
- UInt rc = r;
+ UInt rc = r.reg;
rc = (rc >> 28) & 0x0F;
vassert(rc >= HRcInt32 && rc <= HRcVec128);
return (HRegClass)rc;
}
static inline UInt hregNumber ( HReg r ) {
- return ((UInt)r) & 0x00FFFFFF;
+ return r.reg & 0x00FFFFFF;
}
static inline Bool hregIsVirtual ( HReg r ) {
- return toBool(((UInt)r) & (1<<24));
+ return toBool(r.reg & (1<<24));
}
+static inline Bool sameHReg ( HReg r1, HReg r2 )
+{
+ return toBool(r1.reg == r2.reg);
+}
+static const HReg INVALID_HREG = { 0xFFFFFFFF };
-
-#define INVALID_HREG ((HReg)0xFFFFFFFF)
-
+static inline Bool hregIsInvalid ( HReg r )
+{
+ return sameHReg(r, INVALID_HREG);
+}
/*---------------------------------------------------------*/
/*--- Recording register usage (for reg-alloc) ---*/
{
vassert(tmp >= 0);
vassert(tmp < env->n_vregmap);
- vassert(env->vregmapHI[tmp] != INVALID_HREG);
+ vassert(! hregIsInvalid(env->vregmapHI[tmp]));
*vrLO = env->vregmap[tmp];
*vrHI = env->vregmapHI[tmp];
}
vassert(env->mode64);
vassert(tmp >= 0);
vassert(tmp < env->n_vregmap);
- vassert(env->vregmapHI[tmp] != INVALID_HREG);
+ vassert(! hregIsInvalid(env->vregmapHI[tmp]));
*vrLO = env->vregmap[tmp];
*vrHI = env->vregmapHI[tmp];
}
}
/* Move the args to their final destinations. */
for (i = 0; i < argreg; i++) {
- if (tmpregs[i] == INVALID_HREG) // Skip invalid regs
+ if (hregIsInvalid(tmpregs[i])) // Skip invalid regs
continue;
/* None of these insns, including any spill code that might
be generated, may alter the condition codes. */
/* special-case "mr" */
if (i->Pin.Alu.op == Palu_OR && // or Rd,Rs,Rs == mr Rd,Rs
rh_srcR->tag == Prh_Reg &&
- rh_srcR->Prh.Reg.reg == r_srcL) {
+ sameHReg(rh_srcR->Prh.Reg.reg, r_srcL)) {
vex_printf("mr ");
ppHRegPPC(i->Pin.Alu.dst);
vex_printf(",");
return;
case Pin_AvBinary:
if (i->Pin.AvBinary.op == Pav_XOR
- && i->Pin.AvBinary.dst == i->Pin.AvBinary.srcL
- && i->Pin.AvBinary.dst == i->Pin.AvBinary.srcR) {
+ && sameHReg(i->Pin.AvBinary.dst, i->Pin.AvBinary.srcL)
+ && sameHReg(i->Pin.AvBinary.dst, i->Pin.AvBinary.srcR)) {
/* reg-alloc needs to understand 'xor r,r,r' as a write of r */
/* (as opposed to a rite of passage :-) */
addHRegUse(u, HRmWrite, i->Pin.AvBinary.dst);
return False;
if (i->Pin.Alu.srcR->tag != Prh_Reg)
return False;
- if (i->Pin.Alu.srcR->Prh.Reg.reg != i->Pin.Alu.srcL)
+ if (! sameHReg(i->Pin.Alu.srcR->Prh.Reg.reg, i->Pin.Alu.srcL))
return False;
*src = i->Pin.Alu.srcL;
*dst = i->Pin.Alu.dst;
{
vassert(tmp >= 0);
vassert(tmp < env->n_vregmap);
- vassert(env->vregmapMedLo[tmp] != INVALID_HREG);
+ vassert(! hregIsInvalid(env->vregmapMedLo[tmp]));
*vrLO = env->vregmapLo[tmp];
*vrHI = env->vregmapMedLo[tmp];
}
vassert(!env->mode64);
vassert(tmp >= 0);
vassert(tmp < env->n_vregmap);
- vassert(env->vregmapMedLo[tmp] != INVALID_HREG);
+ vassert(! hregIsInvalid(env->vregmapMedLo[tmp]));
*vrHi = env->vregmapHi[tmp];
*vrMedHi = env->vregmapMedHi[tmp];
*vrMedLo = env->vregmapMedLo[tmp];
/* Move the args to their final destinations. */
for (i = 0; i < argreg; i++) {
- if (tmpregs[i] == INVALID_HREG) // Skip invalid regs
+ if (hregIsInvalid(tmpregs[i])) // Skip invalid regs
continue;
/* None of these insns, including any spill code that might
be generated, may alter the condition codes. */
for (i = 1; i <= 5; ++i) {
addHRegUse(u, HRmWrite, mkHReg(i, HRcInt64, False));
}
- if (insn->variant.helper_call.dst != INVALID_HREG)
+ if (! hregIsInvalid(insn->variant.helper_call.dst))
addHRegUse(u, HRmWrite, insn->variant.helper_call.dst);
/* Ditto for floating point registers. f0 - f7 are volatile */
case S390_INSN_BFP_CONVERT:
addHRegUse(u, HRmWrite, insn->variant.bfp_convert.dst_hi);
- if (insn->variant.bfp_convert.dst_lo != INVALID_HREG)
+ if (! hregIsInvalid(insn->variant.bfp_convert.dst_lo))
addHRegUse(u, HRmWrite, insn->variant.bfp_convert.dst_lo);
addHRegUse(u, HRmRead, insn->variant.bfp_convert.op_hi);
- if (insn->variant.bfp_convert.op_lo != INVALID_HREG)
+ if (! hregIsInvalid(insn->variant.bfp_convert.op_lo))
addHRegUse(u, HRmRead, insn->variant.bfp_convert.op_lo);
break;
case S390_INSN_DFP_CONVERT:
addHRegUse(u, HRmWrite, insn->variant.dfp_convert.dst_hi);
- if (insn->variant.dfp_convert.dst_lo != INVALID_HREG)
+ if (! hregIsInvalid(insn->variant.dfp_convert.dst_lo))
addHRegUse(u, HRmWrite, insn->variant.dfp_convert.dst_lo);
addHRegUse(u, HRmRead, insn->variant.dfp_convert.op_hi); /* operand */
- if (insn->variant.dfp_convert.op_lo != INVALID_HREG)
+ if (! hregIsInvalid(insn->variant.dfp_convert.op_lo))
addHRegUse(u, HRmRead, insn->variant.dfp_convert.op_lo); /* operand */
break;
As for the arguments of the helper call -- they will be loaded into
non-virtual registers. Again, we don't need to do anything for those
here. */
- if (insn->variant.helper_call.dst != INVALID_HREG)
+ if (! hregIsInvalid(insn->variant.helper_call.dst))
insn->variant.helper_call.dst = lookupHRegRemap(m, insn->variant.helper_call.dst);
break;
case S390_INSN_BFP_CONVERT:
insn->variant.bfp_convert.dst_hi =
lookupHRegRemap(m, insn->variant.bfp_convert.dst_hi);
- if (insn->variant.bfp_convert.dst_lo != INVALID_HREG)
+ if (! hregIsInvalid(insn->variant.bfp_convert.dst_lo))
insn->variant.bfp_convert.dst_lo =
lookupHRegRemap(m, insn->variant.bfp_convert.dst_lo);
insn->variant.bfp_convert.op_hi =
lookupHRegRemap(m, insn->variant.bfp_convert.op_hi);
- if (insn->variant.bfp_convert.op_lo != INVALID_HREG)
+ if (! hregIsInvalid(insn->variant.bfp_convert.op_lo))
insn->variant.bfp_convert.op_lo =
lookupHRegRemap(m, insn->variant.bfp_convert.op_lo);
break;
case S390_INSN_DFP_CONVERT:
insn->variant.dfp_convert.dst_hi =
lookupHRegRemap(m, insn->variant.dfp_convert.dst_hi);
- if (insn->variant.dfp_convert.dst_lo != INVALID_HREG)
+ if (! hregIsInvalid(insn->variant.dfp_convert.dst_lo))
insn->variant.dfp_convert.dst_lo =
lookupHRegRemap(m, insn->variant.dfp_convert.dst_lo);
insn->variant.dfp_convert.op_hi =
lookupHRegRemap(m, insn->variant.dfp_convert.op_hi);
- if (insn->variant.dfp_convert.op_lo != INVALID_HREG)
+ if (! hregIsInvalid(insn->variant.dfp_convert.op_lo))
insn->variant.dfp_convert.op_lo =
lookupHRegRemap(m, insn->variant.dfp_convert.op_lo);
break;
if (size == 16) {
/* From smaller size to 16 bytes */
vassert(is_valid_fp128_regpair(dst_hi, dst_lo));
- vassert(op_lo == INVALID_HREG);
+ vassert(hregIsInvalid(op_lo));
} else {
/* From 16 bytes to smaller size */
vassert(is_valid_fp128_regpair(op_hi, op_lo));
- vassert(dst_lo == INVALID_HREG);
+ vassert(hregIsInvalid(dst_lo));
}
insn->tag = S390_INSN_BFP_CONVERT;
if (size == 16) {
/* From smaller size to 16 bytes */
vassert(is_valid_fp128_regpair(dst_hi, dst_lo));
- vassert(op_lo == INVALID_HREG);
+ vassert(hregIsInvalid(op_lo));
} else {
/* From 16 bytes to smaller size */
vassert(is_valid_fp128_regpair(op_hi, op_lo));
- vassert(dst_lo == INVALID_HREG);
+ vassert(hregIsInvalid(dst_lo));
}
insn->tag = S390_INSN_DFP_CONVERT;
break;
case S390_INSN_HELPER_CALL: {
- if (insn->variant.helper_call.dst != INVALID_HREG) {
+ if (! hregIsInvalid(insn->variant.helper_call.dst)) {
s390_sprintf(buf, "%M if (%C) %R = %s{%I}(%L)", "v-call",
insn->variant.helper_call.cond,
insn->variant.helper_call.dst,
target = insn->variant.helper_call.target;
if (cond != S390_CC_ALWAYS
- && insn->variant.helper_call.dst != INVALID_HREG) {
+ && ! hregIsInvalid(insn->variant.helper_call.dst)) {
/* The call might not happen (it isn't unconditional) and it
returns a result. In this case we will need to generate a
control flow diamond to put 0x555..555 in the return
buf = s390_emit_BASR(buf, S390_REGNO_LINK_REGISTER, 1); // call helper
/* Move the return value to the destination register */
- if (insn->variant.helper_call.dst != INVALID_HREG) {
+ if (! hregIsInvalid(insn->variant.helper_call.dst)) {
buf = s390_emit_LGR(buf, hregNumber(insn->variant.helper_call.dst),
S390_REGNO_RETURN_VALUE);
}
lookupIRTemp(ISelEnv *env, IRTemp tmp)
{
vassert(tmp < env->n_vregmap);
- vassert(env->vregmap[tmp] != INVALID_HREG);
+ vassert(! hregIsInvalid(env->vregmap[tmp]));
return env->vregmap[tmp];
}
lookupIRTemp128(HReg *hi, HReg *lo, ISelEnv *env, IRTemp tmp)
{
vassert(tmp < env->n_vregmap);
- vassert(env->vregmapHI[tmp] != INVALID_HREG);
+ vassert(! hregIsInvalid(env->vregmapHI[tmp]));
*lo = env->vregmap[tmp];
*hi = env->vregmapHI[tmp];
return;
case Xin_SseReRg:
if (i->Xin.SseReRg.op == Xsse_XOR
- && i->Xin.SseReRg.src == i->Xin.SseReRg.dst) {
+ && sameHReg(i->Xin.SseReRg.src, i->Xin.SseReRg.dst)) {
/* reg-alloc needs to understand 'xor r,r' as a write of r */
/* (as opposed to a rite of passage :-) */
addHRegUse(u, HRmWrite, i->Xin.SseReRg.dst);
&& (i->Xin.Alu32R.op == Xalu_MOV || i->Xin.Alu32R.op == Xalu_OR
|| i->Xin.Alu32R.op == Xalu_XOR)
&& i->Xin.Alu32R.src->tag == Xrmi_Reg
- && i->Xin.Alu32R.src->Xrmi.Reg.reg == vreg) {
- vassert(i->Xin.Alu32R.dst != vreg);
+ && sameHReg(i->Xin.Alu32R.src->Xrmi.Reg.reg, vreg)) {
+ vassert(! sameHReg(i->Xin.Alu32R.dst, vreg));
return X86Instr_Alu32R(
i->Xin.Alu32R.op,
X86RMI_Mem( X86AMode_IR( spill_off, hregX86_EBP())),
if (i->tag == Xin_Alu32R
&& (i->Xin.Alu32R.op == Xalu_CMP)
&& i->Xin.Alu32R.src->tag == Xrmi_Imm
- && i->Xin.Alu32R.dst == vreg) {
+ && sameHReg(i->Xin.Alu32R.dst, vreg)) {
return X86Instr_Alu32M(
i->Xin.Alu32R.op,
X86RI_Imm( i->Xin.Alu32R.src->Xrmi.Imm.imm32 ),
*/
if (i->tag == Xin_Push
&& i->Xin.Push.src->tag == Xrmi_Reg
- && i->Xin.Push.src->Xrmi.Reg.reg == vreg) {
+ && sameHReg(i->Xin.Push.src->Xrmi.Reg.reg, vreg)) {
return X86Instr_Push(
X86RMI_Mem( X86AMode_IR( spill_off, hregX86_EBP()))
);
Convert to CMov32(RM_Mem, dst) */
if (i->tag == Xin_CMov32
&& i->Xin.CMov32.src->tag == Xrm_Reg
- && i->Xin.CMov32.src->Xrm.Reg.reg == vreg) {
- vassert(i->Xin.CMov32.dst != vreg);
+ && sameHReg(i->Xin.CMov32.src->Xrm.Reg.reg, vreg)) {
+ vassert(! sameHReg(i->Xin.CMov32.dst, vreg));
return X86Instr_CMov32(
i->Xin.CMov32.cond,
X86RM_Mem( X86AMode_IR( spill_off, hregX86_EBP() )),
/* Deal with form: Test32(imm,RM_Reg vreg) -> Test32(imm,amode) */
if (i->tag == Xin_Test32
&& i->Xin.Test32.dst->tag == Xrm_Reg
- && i->Xin.Test32.dst->Xrm.Reg.reg == vreg) {
+ && sameHReg(i->Xin.Test32.dst->Xrm.Reg.reg, vreg)) {
return X86Instr_Test32(
i->Xin.Test32.imm32,
X86RM_Mem( X86AMode_IR( spill_off, hregX86_EBP() ) )
{
if (am->tag == Xam_IR) {
if (am->Xam.IR.imm == 0
- && am->Xam.IR.reg != hregX86_ESP()
- && am->Xam.IR.reg != hregX86_EBP() ) {
+ && ! sameHReg(am->Xam.IR.reg, hregX86_ESP())
+ && ! sameHReg(am->Xam.IR.reg, hregX86_EBP()) ) {
*p++ = mkModRegRM(0, iregNo(greg), iregNo(am->Xam.IR.reg));
return p;
}
if (fits8bits(am->Xam.IR.imm)
- && am->Xam.IR.reg != hregX86_ESP()) {
+ && ! sameHReg(am->Xam.IR.reg, hregX86_ESP())) {
*p++ = mkModRegRM(1, iregNo(greg), iregNo(am->Xam.IR.reg));
*p++ = toUChar(am->Xam.IR.imm & 0xFF);
return p;
}
- if (am->Xam.IR.reg != hregX86_ESP()) {
+ if (! sameHReg(am->Xam.IR.reg, hregX86_ESP())) {
*p++ = mkModRegRM(2, iregNo(greg), iregNo(am->Xam.IR.reg));
p = emit32(p, am->Xam.IR.imm);
return p;
}
- if (am->Xam.IR.reg == hregX86_ESP()
+ if (sameHReg(am->Xam.IR.reg, hregX86_ESP())
&& fits8bits(am->Xam.IR.imm)) {
*p++ = mkModRegRM(1, iregNo(greg), 4);
*p++ = 0x24;
}
if (am->tag == Xam_IRRS) {
if (fits8bits(am->Xam.IRRS.imm)
- && am->Xam.IRRS.index != hregX86_ESP()) {
+ && ! sameHReg(am->Xam.IRRS.index, hregX86_ESP())) {
*p++ = mkModRegRM(1, iregNo(greg), 4);
*p++ = mkSIB(am->Xam.IRRS.shift, iregNo(am->Xam.IRRS.index),
iregNo(am->Xam.IRRS.base));
*p++ = toUChar(am->Xam.IRRS.imm & 0xFF);
return p;
}
- if (am->Xam.IRRS.index != hregX86_ESP()) {
+ if (! sameHReg(am->Xam.IRRS.index, hregX86_ESP())) {
*p++ = mkModRegRM(2, iregNo(greg), 4);
*p++ = mkSIB(am->Xam.IRRS.shift, iregNo(am->Xam.IRRS.index),
iregNo(am->Xam.IRRS.base));
}
switch (i->Xin.Alu32R.src->tag) {
case Xrmi_Imm:
- if (i->Xin.Alu32R.dst == hregX86_EAX()
+ if (sameHReg(i->Xin.Alu32R.dst, hregX86_EAX())
&& !fits8bits(i->Xin.Alu32R.src->Xrmi.Imm.imm32)) {
*p++ = toUChar(opc_imma);
p = emit32(p, i->Xin.Alu32R.src->Xrmi.Imm.imm32);
addRegUsage_X86AMode(&u, i->Xin.Store.dst);
for (j = 0; j < u.n_used; j++) {
HReg r = u.hreg[j];
- if (r == eax) a_ok = False;
- if (r == ebx) b_ok = False;
- if (r == ecx) c_ok = False;
- if (r == edx) d_ok = False;
+ if (sameHReg(r, eax)) a_ok = False;
+ if (sameHReg(r, ebx)) b_ok = False;
+ if (sameHReg(r, ecx)) c_ok = False;
+ if (sameHReg(r, edx)) d_ok = False;
}
if (a_ok) swap = eax;
if (b_ok) swap = ebx;
if (c_ok) swap = ecx;
if (d_ok) swap = edx;
- vassert(swap != INVALID_HREG);
+ vassert(! hregIsInvalid(swap));
/* xchgl %source, %swap. Could do better if swap is %eax. */
*p++ = 0x87;
p = doAMode_R(p, i->Xin.Store.src, swap);
{
vassert(tmp >= 0);
vassert(tmp < env->n_vregmap);
- vassert(env->vregmapHI[tmp] != INVALID_HREG);
+ vassert(! hregIsInvalid(env->vregmapHI[tmp]));
*vrLO = env->vregmap[tmp];
*vrHI = env->vregmapHI[tmp];
}
return
toBool( hregClass(am->Xam.IR.reg) == HRcInt32
&& (hregIsVirtual(am->Xam.IR.reg)
- || am->Xam.IR.reg == hregX86_EBP()) );
+ || sameHReg(am->Xam.IR.reg, hregX86_EBP())) );
case Xam_IRRS:
return
toBool( hregClass(am->Xam.IRRS.base) == HRcInt32