/* fall through */
}
+ /* ------------------- v7 barrier insns ------------------- */
+ switch (insn) {
+ case 0xF57FF06F: /* ISB */
+ stmt( IRStmt_MBE(Imbe_Fence) );
+ DIP("ISB\n");
+ return True;
+ case 0xF57FF04F: /* DSB */
+ stmt( IRStmt_MBE(Imbe_Fence) );
+ DIP("DSB\n");
+ return True;
+ case 0xF57FF05F: /* DMB */
+ stmt( IRStmt_MBE(Imbe_Fence) );
+ DIP("DMB\n");
+ return True;
+ default:
+ break;
+ }
+
return False;
# undef INSN_COND
/* fall through */
}
+ /* Handle various kinds of barriers. This is rather indiscriminate
+ in the sense that they are all turned into an IR Fence, which
+ means we don't know which they are, so the back end has to
+ re-emit them all when it comes acrosss an IR Fence.
+ */
+ switch (insn) {
+ case 0xEE070F9A: /* v6 */
+ /* mcr 15, 0, r0, c7, c10, 4 (v6) equiv to DSB (v7). Data
+ Synch Barrier -- ensures completion of memory accesses. */
+ stmt( IRStmt_MBE(Imbe_Fence) );
+ DIP("mcr 15, 0, r0, c7, c10, 4 (data synch barrier)\n");
+ goto decode_success;
+ case 0xEE070FBA: /* v6 */
+ /* mcr 15, 0, r0, c7, c10, 5 (v6) equiv to DMB (v7). Data
+ Memory Barrier -- ensures ordering of memory accesses. */
+ stmt( IRStmt_MBE(Imbe_Fence) );
+ DIP("mcr 15, 0, r0, c7, c10, 5 (data memory barrier)\n");
+ goto decode_success;
+ case 0xEE070F95: /* v6 */
+ /* mcr 15, 0, r0, c7, c5, 4 (v6) equiv to ISB (v7).
+ Instruction Synchronisation Barrier (or Flush Prefetch
+ Buffer) -- a pipe flush, I think. I suspect we could
+ ignore those, but to be on the safe side emit a fence
+ anyway. */
+ stmt( IRStmt_MBE(Imbe_Fence) );
+ DIP("mcr 15, 0, r0, c7, c5, 4 (insn synch barrier)\n");
+ goto decode_success;
+ default:
+ break;
+ }
+
/* ----------------------------------------------------------- */
/* -- Undecodable -- */
/* ----------------------------------------------------------- */
i->ARMin.FPSCR.iReg = iReg;
return i;
}
+ARMInstr* ARMInstr_MFence ( void ) {
+ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ i->tag = ARMin_MFence;
+ return i;
+}
void ppARMInstr ( ARMInstr* i ) {
switch (i->tag) {
vex_printf(", fpscr");
}
return;
+ case ARMin_MFence:
+ vex_printf("mfence (mcr 15,0,r0,c7,c10,4; 15,0,r0,c7,c10,5; "
+ "15,0,r0,c7,c5,4)");
+ return;
+
unhandled:
vex_printf("ppARMInstr: unhandled case (tag %d)", (Int)i->tag);
vpanic("ppARMInstr(1)");
else
addHRegUse(u, HRmWrite, i->ARMin.FPSCR.iReg);
return;
+ case ARMin_MFence:
+ return;
unhandled:
default:
ppARMInstr(i);
case ARMin_FPSCR:
i->ARMin.FPSCR.iReg = lookupHRegRemap(m, i->ARMin.FPSCR.iReg);
return;
+ case ARMin_MFence:
+ return;
unhandled:
default:
ppARMInstr(i);
}
goto bad; // FPSCR -> iReg case currently ATC
}
+ case ARMin_MFence: {
+ *p++ = 0xEE070F9A; /* mcr 15,0,r0,c7,c10,4 (DSB) */
+ *p++ = 0xEE070FBA; /* mcr 15,0,r0,c7,c10,5 (DMB) */
+ *p++ = 0xEE070F95; /* mcr 15,0,r0,c7,c5,4 (ISB) */
+ goto done;
+ }
default:
goto bad;
}
ARMin_VXferD,
ARMin_VXferS,
ARMin_VCvtID,
- ARMin_FPSCR
+ ARMin_FPSCR,
+ ARMin_MFence
}
ARMInstrTag;
Bool toFPSCR;
HReg iReg;
} FPSCR;
+ /* Mem fence. An insn which fences all loads and stores as
+ much as possible before continuing. On ARM we emit the
+ sequence
+ mcr 15,0,r0,c7,c10,4 (DSB)
+ mcr 15,0,r0,c7,c10,5 (DMB)
+ mcr 15,0,r0,c7,c5,4 (ISB)
+ which is probably total overkill, but better safe than
+ sorry.
+ */
+ struct {
+ } MFence;
+
} ARMin;
}
ARMInstr;
extern ARMInstr* ARMInstr_VCvtID ( Bool iToD, Bool syned,
HReg dst, HReg src );
extern ARMInstr* ARMInstr_FPSCR ( Bool toFPSCR, HReg iReg );
+extern ARMInstr* ARMInstr_MFence ( void );
extern void ppARMInstr ( ARMInstr* );
break;
}
+ /* --------- MEM FENCE --------- */
+ case Ist_MBE:
+ switch (stmt->Ist.MBE.event) {
+ case Imbe_Fence:
+ addInstr(env,ARMInstr_MFence());
+ return;
+ default:
+ break;
+ }
+ break;
+
/* --------- INSTR MARK --------- */
/* Doesn't generate any executable code ... */
case Ist_IMark: