return True;
}
- //fail:
+ /* ------------------- CLREX ------------------ */
+ /* 31 23 15 11 7
+ 1101 0101 0000 0011 0011 m 0101 1111 CLREX CRm
+ CRm is apparently ignored.
+ */
+ if ((INSN(31,0) & 0xFFFFF0FF) == 0xD503305F) {
+ UInt mm = INSN(11,8);
+ /* AFAICS, this simply cancels a (all?) reservations made by a
+ (any?) preceding LDREX(es). Arrange to hand it through to
+ the back end. */
+ stmt( IRStmt_MBE(Imbe_CancelReservation) );
+ DIP("clrex #%u\n", mm);
+ return True;
+ }
+
vex_printf("ARM64 front end: branch_etc\n");
return False;
# undef INSN
i->tag = ARM64in_MFence;
return i;
}
+ARM64Instr* ARM64Instr_ClrEX ( void ) {
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+ i->tag = ARM64in_ClrEX;
+ return i;
+}
ARM64Instr* ARM64Instr_VLdStH ( Bool isLoad, HReg sD, HReg rN, UInt uimm12 ) {
ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VLdStH;
case ARM64in_MFence:
vex_printf("(mfence) dsb sy; dmb sy; isb");
return;
+ case ARM64in_ClrEX:
+ vex_printf("clrex #15");
+ return;
case ARM64in_VLdStH:
if (i->ARM64in.VLdStH.isLoad) {
vex_printf("ldr ");
return;
case ARM64in_MFence:
return;
+ case ARM64in_ClrEX:
+ return;
case ARM64in_VLdStH:
addHRegUse(u, HRmRead, i->ARM64in.VLdStH.rN);
if (i->ARM64in.VLdStH.isLoad) {
return;
case ARM64in_MFence:
return;
+ case ARM64in_ClrEX:
+ return;
case ARM64in_VLdStH:
i->ARM64in.VLdStH.hD = lookupHRegRemap(m, i->ARM64in.VLdStH.hD);
i->ARM64in.VLdStH.rN = lookupHRegRemap(m, i->ARM64in.VLdStH.rN);
*p++ = 0xD5033FDF; /* ISB */
goto done;
}
- //case ARM64in_CLREX: {
- // //ATC, but believed to be correct
- // goto bad;
- // *p++ = 0xD5033F5F; /* clrex */
- // goto done;
- //}
+ case ARM64in_ClrEX: {
+ *p++ = 0xD5033F5F; /* clrex #15 */
+ goto done;
+ }
case ARM64in_VLdStH: {
/* 01 111101 01 imm12 n t LDR Ht, [Xn|SP, #imm12 * 2]
01 111101 00 imm12 n t STR Ht, [Xn|SP, #imm12 * 2]
ARM64in_LdrEX,
ARM64in_StrEX,
ARM64in_MFence,
+ ARM64in_ClrEX,
/* ARM64in_V*: scalar ops involving vector registers */
ARM64in_VLdStH, /* ld/st to/from low 16 bits of vec reg, imm offset */
ARM64in_VLdStS, /* ld/st to/from low 32 bits of vec reg, imm offset */
total nuclear overkill, but better safe than sorry. */
struct {
} MFence;
+ /* A CLREX instruction. */
+ struct {
+ } ClrEX;
/* --- INSTRUCTIONS INVOLVING VECTOR REGISTERS --- */
/* ld/st to/from low 16 bits of vec reg, imm offset */
struct {
extern ARM64Instr* ARM64Instr_LdrEX ( Int szB );
extern ARM64Instr* ARM64Instr_StrEX ( Int szB );
extern ARM64Instr* ARM64Instr_MFence ( void );
+extern ARM64Instr* ARM64Instr_ClrEX ( void );
extern ARM64Instr* ARM64Instr_VLdStH ( Bool isLoad, HReg sD, HReg rN,
UInt uimm12 /* 0 .. 8190, 0 % 2 */ );
extern ARM64Instr* ARM64Instr_VLdStS ( Bool isLoad, HReg sD, HReg rN,
case Imbe_Fence:
addInstr(env, ARM64Instr_MFence());
return;
+ case Imbe_CancelReservation:
+ addInstr(env, ARM64Instr_ClrEX());
+ return;
default:
break;
}