to main_util.c because it caused linker problems with ICC.
See comments in BZ #339542.
This change re-enables inlining of that function by adding it
(renamed as LibVEX_Alloc_inline) to main_util.h.
500+ callsites changed accordingly.
git-svn-id: svn://svn.valgrind.org/vex/trunk@3103
{
#if 0
*nregs = 6;
- *arr = LibVEX_Alloc(*nregs * sizeof(HReg));
+ *arr = LibVEX_Alloc_inline(*nregs * sizeof(HReg));
(*arr)[ 0] = hregAMD64_RSI();
(*arr)[ 1] = hregAMD64_RDI();
(*arr)[ 2] = hregAMD64_RBX();
#endif
#if 1
*nregs = 20;
- *arr = LibVEX_Alloc(*nregs * sizeof(HReg));
+ *arr = LibVEX_Alloc_inline(*nregs * sizeof(HReg));
(*arr)[ 0] = hregAMD64_RSI();
(*arr)[ 1] = hregAMD64_RDI();
(*arr)[ 2] = hregAMD64_R8();
/* --------- AMD64AMode: memory address expressions. --------- */
AMD64AMode* AMD64AMode_IR ( UInt imm32, HReg reg ) {
- AMD64AMode* am = LibVEX_Alloc(sizeof(AMD64AMode));
+ AMD64AMode* am = LibVEX_Alloc_inline(sizeof(AMD64AMode));
am->tag = Aam_IR;
am->Aam.IR.imm = imm32;
am->Aam.IR.reg = reg;
return am;
}
AMD64AMode* AMD64AMode_IRRS ( UInt imm32, HReg base, HReg indEx, Int shift ) {
- AMD64AMode* am = LibVEX_Alloc(sizeof(AMD64AMode));
+ AMD64AMode* am = LibVEX_Alloc_inline(sizeof(AMD64AMode));
am->tag = Aam_IRRS;
am->Aam.IRRS.imm = imm32;
am->Aam.IRRS.base = base;
/* --------- Operand, which can be reg, immediate or memory. --------- */
AMD64RMI* AMD64RMI_Imm ( UInt imm32 ) {
- AMD64RMI* op = LibVEX_Alloc(sizeof(AMD64RMI));
+ AMD64RMI* op = LibVEX_Alloc_inline(sizeof(AMD64RMI));
op->tag = Armi_Imm;
op->Armi.Imm.imm32 = imm32;
return op;
}
AMD64RMI* AMD64RMI_Reg ( HReg reg ) {
- AMD64RMI* op = LibVEX_Alloc(sizeof(AMD64RMI));
+ AMD64RMI* op = LibVEX_Alloc_inline(sizeof(AMD64RMI));
op->tag = Armi_Reg;
op->Armi.Reg.reg = reg;
return op;
}
AMD64RMI* AMD64RMI_Mem ( AMD64AMode* am ) {
- AMD64RMI* op = LibVEX_Alloc(sizeof(AMD64RMI));
+ AMD64RMI* op = LibVEX_Alloc_inline(sizeof(AMD64RMI));
op->tag = Armi_Mem;
op->Armi.Mem.am = am;
return op;
/* --------- Operand, which can be reg or immediate only. --------- */
AMD64RI* AMD64RI_Imm ( UInt imm32 ) {
- AMD64RI* op = LibVEX_Alloc(sizeof(AMD64RI));
+ AMD64RI* op = LibVEX_Alloc_inline(sizeof(AMD64RI));
op->tag = Ari_Imm;
op->Ari.Imm.imm32 = imm32;
return op;
}
AMD64RI* AMD64RI_Reg ( HReg reg ) {
- AMD64RI* op = LibVEX_Alloc(sizeof(AMD64RI));
+ AMD64RI* op = LibVEX_Alloc_inline(sizeof(AMD64RI));
op->tag = Ari_Reg;
op->Ari.Reg.reg = reg;
return op;
/* --------- Operand, which can be reg or memory only. --------- */
AMD64RM* AMD64RM_Reg ( HReg reg ) {
- AMD64RM* op = LibVEX_Alloc(sizeof(AMD64RM));
+ AMD64RM* op = LibVEX_Alloc_inline(sizeof(AMD64RM));
op->tag = Arm_Reg;
op->Arm.Reg.reg = reg;
return op;
}
AMD64RM* AMD64RM_Mem ( AMD64AMode* am ) {
- AMD64RM* op = LibVEX_Alloc(sizeof(AMD64RM));
+ AMD64RM* op = LibVEX_Alloc_inline(sizeof(AMD64RM));
op->tag = Arm_Mem;
op->Arm.Mem.am = am;
return op;
}
AMD64Instr* AMD64Instr_Imm64 ( ULong imm64, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Imm64;
i->Ain.Imm64.imm64 = imm64;
i->Ain.Imm64.dst = dst;
return i;
}
AMD64Instr* AMD64Instr_Alu64R ( AMD64AluOp op, AMD64RMI* src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Alu64R;
i->Ain.Alu64R.op = op;
i->Ain.Alu64R.src = src;
return i;
}
AMD64Instr* AMD64Instr_Alu64M ( AMD64AluOp op, AMD64RI* src, AMD64AMode* dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Alu64M;
i->Ain.Alu64M.op = op;
i->Ain.Alu64M.src = src;
return i;
}
AMD64Instr* AMD64Instr_Sh64 ( AMD64ShiftOp op, UInt src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Sh64;
i->Ain.Sh64.op = op;
i->Ain.Sh64.src = src;
return i;
}
AMD64Instr* AMD64Instr_Test64 ( UInt imm32, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Test64;
i->Ain.Test64.imm32 = imm32;
i->Ain.Test64.dst = dst;
return i;
}
AMD64Instr* AMD64Instr_Unary64 ( AMD64UnaryOp op, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Unary64;
i->Ain.Unary64.op = op;
i->Ain.Unary64.dst = dst;
return i;
}
AMD64Instr* AMD64Instr_Lea64 ( AMD64AMode* am, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Lea64;
i->Ain.Lea64.am = am;
i->Ain.Lea64.dst = dst;
return i;
}
AMD64Instr* AMD64Instr_Alu32R ( AMD64AluOp op, AMD64RMI* src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Alu32R;
i->Ain.Alu32R.op = op;
i->Ain.Alu32R.src = src;
return i;
}
AMD64Instr* AMD64Instr_MulL ( Bool syned, AMD64RM* src ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_MulL;
i->Ain.MulL.syned = syned;
i->Ain.MulL.src = src;
return i;
}
AMD64Instr* AMD64Instr_Div ( Bool syned, Int sz, AMD64RM* src ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Div;
i->Ain.Div.syned = syned;
i->Ain.Div.sz = sz;
return i;
}
AMD64Instr* AMD64Instr_Push( AMD64RMI* src ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Push;
i->Ain.Push.src = src;
return i;
}
AMD64Instr* AMD64Instr_Call ( AMD64CondCode cond, Addr64 target, Int regparms,
RetLoc rloc ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Call;
i->Ain.Call.cond = cond;
i->Ain.Call.target = target;
AMD64Instr* AMD64Instr_XDirect ( Addr64 dstGA, AMD64AMode* amRIP,
AMD64CondCode cond, Bool toFastEP ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_XDirect;
i->Ain.XDirect.dstGA = dstGA;
i->Ain.XDirect.amRIP = amRIP;
}
AMD64Instr* AMD64Instr_XIndir ( HReg dstGA, AMD64AMode* amRIP,
AMD64CondCode cond ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_XIndir;
i->Ain.XIndir.dstGA = dstGA;
i->Ain.XIndir.amRIP = amRIP;
}
AMD64Instr* AMD64Instr_XAssisted ( HReg dstGA, AMD64AMode* amRIP,
AMD64CondCode cond, IRJumpKind jk ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_XAssisted;
i->Ain.XAssisted.dstGA = dstGA;
i->Ain.XAssisted.amRIP = amRIP;
}
AMD64Instr* AMD64Instr_CMov64 ( AMD64CondCode cond, HReg src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_CMov64;
i->Ain.CMov64.cond = cond;
i->Ain.CMov64.src = src;
}
AMD64Instr* AMD64Instr_CLoad ( AMD64CondCode cond, UChar szB,
AMD64AMode* addr, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_CLoad;
i->Ain.CLoad.cond = cond;
i->Ain.CLoad.szB = szB;
}
AMD64Instr* AMD64Instr_CStore ( AMD64CondCode cond, UChar szB,
HReg src, AMD64AMode* addr ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_CStore;
i->Ain.CStore.cond = cond;
i->Ain.CStore.szB = szB;
return i;
}
AMD64Instr* AMD64Instr_MovxLQ ( Bool syned, HReg src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_MovxLQ;
i->Ain.MovxLQ.syned = syned;
i->Ain.MovxLQ.src = src;
}
AMD64Instr* AMD64Instr_LoadEX ( UChar szSmall, Bool syned,
AMD64AMode* src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_LoadEX;
i->Ain.LoadEX.szSmall = szSmall;
i->Ain.LoadEX.syned = syned;
return i;
}
AMD64Instr* AMD64Instr_Store ( UChar sz, HReg src, AMD64AMode* dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Store;
i->Ain.Store.sz = sz;
i->Ain.Store.src = src;
return i;
}
AMD64Instr* AMD64Instr_Set64 ( AMD64CondCode cond, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Set64;
i->Ain.Set64.cond = cond;
i->Ain.Set64.dst = dst;
return i;
}
AMD64Instr* AMD64Instr_Bsfr64 ( Bool isFwds, HReg src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Bsfr64;
i->Ain.Bsfr64.isFwds = isFwds;
i->Ain.Bsfr64.src = src;
return i;
}
AMD64Instr* AMD64Instr_MFence ( void ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_MFence;
return i;
}
AMD64Instr* AMD64Instr_ACAS ( AMD64AMode* addr, UChar sz ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_ACAS;
i->Ain.ACAS.addr = addr;
i->Ain.ACAS.sz = sz;
return i;
}
AMD64Instr* AMD64Instr_DACAS ( AMD64AMode* addr, UChar sz ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_DACAS;
i->Ain.DACAS.addr = addr;
i->Ain.DACAS.sz = sz;
AMD64Instr* AMD64Instr_A87Free ( Int nregs )
{
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_A87Free;
i->Ain.A87Free.nregs = nregs;
vassert(nregs >= 1 && nregs <= 7);
}
AMD64Instr* AMD64Instr_A87PushPop ( AMD64AMode* addr, Bool isPush, UChar szB )
{
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_A87PushPop;
i->Ain.A87PushPop.addr = addr;
i->Ain.A87PushPop.isPush = isPush;
}
AMD64Instr* AMD64Instr_A87FpOp ( A87FpOp op )
{
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_A87FpOp;
i->Ain.A87FpOp.op = op;
return i;
}
AMD64Instr* AMD64Instr_A87LdCW ( AMD64AMode* addr )
{
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_A87LdCW;
i->Ain.A87LdCW.addr = addr;
return i;
}
AMD64Instr* AMD64Instr_A87StSW ( AMD64AMode* addr )
{
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_A87StSW;
i->Ain.A87StSW.addr = addr;
return i;
}
AMD64Instr* AMD64Instr_LdMXCSR ( AMD64AMode* addr ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_LdMXCSR;
i->Ain.LdMXCSR.addr = addr;
return i;
}
AMD64Instr* AMD64Instr_SseUComIS ( Int sz, HReg srcL, HReg srcR, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_SseUComIS;
i->Ain.SseUComIS.sz = toUChar(sz);
i->Ain.SseUComIS.srcL = srcL;
return i;
}
AMD64Instr* AMD64Instr_SseSI2SF ( Int szS, Int szD, HReg src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_SseSI2SF;
i->Ain.SseSI2SF.szS = toUChar(szS);
i->Ain.SseSI2SF.szD = toUChar(szD);
return i;
}
AMD64Instr* AMD64Instr_SseSF2SI ( Int szS, Int szD, HReg src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_SseSF2SI;
i->Ain.SseSF2SI.szS = toUChar(szS);
i->Ain.SseSF2SI.szD = toUChar(szD);
}
AMD64Instr* AMD64Instr_SseSDSS ( Bool from64, HReg src, HReg dst )
{
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_SseSDSS;
i->Ain.SseSDSS.from64 = from64;
i->Ain.SseSDSS.src = src;
}
AMD64Instr* AMD64Instr_SseLdSt ( Bool isLoad, Int sz,
HReg reg, AMD64AMode* addr ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_SseLdSt;
i->Ain.SseLdSt.isLoad = isLoad;
i->Ain.SseLdSt.sz = toUChar(sz);
}
AMD64Instr* AMD64Instr_SseLdzLO ( Int sz, HReg reg, AMD64AMode* addr )
{
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_SseLdzLO;
i->Ain.SseLdzLO.sz = sz;
i->Ain.SseLdzLO.reg = reg;
return i;
}
AMD64Instr* AMD64Instr_Sse32Fx4 ( AMD64SseOp op, HReg src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Sse32Fx4;
i->Ain.Sse32Fx4.op = op;
i->Ain.Sse32Fx4.src = src;
return i;
}
AMD64Instr* AMD64Instr_Sse32FLo ( AMD64SseOp op, HReg src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Sse32FLo;
i->Ain.Sse32FLo.op = op;
i->Ain.Sse32FLo.src = src;
return i;
}
AMD64Instr* AMD64Instr_Sse64Fx2 ( AMD64SseOp op, HReg src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Sse64Fx2;
i->Ain.Sse64Fx2.op = op;
i->Ain.Sse64Fx2.src = src;
return i;
}
AMD64Instr* AMD64Instr_Sse64FLo ( AMD64SseOp op, HReg src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Sse64FLo;
i->Ain.Sse64FLo.op = op;
i->Ain.Sse64FLo.src = src;
return i;
}
AMD64Instr* AMD64Instr_SseReRg ( AMD64SseOp op, HReg re, HReg rg ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_SseReRg;
i->Ain.SseReRg.op = op;
i->Ain.SseReRg.src = re;
return i;
}
AMD64Instr* AMD64Instr_SseCMov ( AMD64CondCode cond, HReg src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_SseCMov;
i->Ain.SseCMov.cond = cond;
i->Ain.SseCMov.src = src;
return i;
}
AMD64Instr* AMD64Instr_SseShuf ( Int order, HReg src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_SseShuf;
i->Ain.SseShuf.order = order;
i->Ain.SseShuf.src = src;
}
//uu AMD64Instr* AMD64Instr_AvxLdSt ( Bool isLoad,
//uu HReg reg, AMD64AMode* addr ) {
-//uu AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+//uu AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
//uu i->tag = Ain_AvxLdSt;
//uu i->Ain.AvxLdSt.isLoad = isLoad;
//uu i->Ain.AvxLdSt.reg = reg;
//uu return i;
//uu }
//uu AMD64Instr* AMD64Instr_AvxReRg ( AMD64SseOp op, HReg re, HReg rg ) {
-//uu AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+//uu AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
//uu i->tag = Ain_AvxReRg;
//uu i->Ain.AvxReRg.op = op;
//uu i->Ain.AvxReRg.src = re;
//uu }
AMD64Instr* AMD64Instr_EvCheck ( AMD64AMode* amCounter,
AMD64AMode* amFailAddr ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_EvCheck;
i->Ain.EvCheck.amCounter = amCounter;
i->Ain.EvCheck.amFailAddr = amFailAddr;
return i;
}
AMD64Instr* AMD64Instr_ProfInc ( void ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_ProfInc;
return i;
}
vassert(archinfo_host->endness == VexEndnessLE);
/* Make up an initial environment to use. */
- env = LibVEX_Alloc(sizeof(ISelEnv));
+ env = LibVEX_Alloc_inline(sizeof(ISelEnv));
env->vreg_ctr = 0;
/* Set up output code array. */
/* Make up an IRTemp -> virtual HReg mapping. This doesn't
change as we go along. */
env->n_vregmap = bb->tyenv->types_used;
- env->vregmap = LibVEX_Alloc(env->n_vregmap * sizeof(HReg));
- env->vregmapHI = LibVEX_Alloc(env->n_vregmap * sizeof(HReg));
+ env->vregmap = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
+ env->vregmapHI = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
/* and finally ... */
env->chainingAllowed = chainingAllowed;
{
Int i = 0;
*nregs = 26;
- *arr = LibVEX_Alloc(*nregs * sizeof(HReg));
+ *arr = LibVEX_Alloc_inline(*nregs * sizeof(HReg));
// callee saves ones (22 to 28) are listed first, since we prefer
// them if they're available
/* --------- Memory address expressions (amodes). --------- */
ARM64AMode* ARM64AMode_RI9 ( HReg reg, Int simm9 ) {
- ARM64AMode* am = LibVEX_Alloc(sizeof(ARM64AMode));
+ ARM64AMode* am = LibVEX_Alloc_inline(sizeof(ARM64AMode));
am->tag = ARM64am_RI9;
am->ARM64am.RI9.reg = reg;
am->ARM64am.RI9.simm9 = simm9;
}
ARM64AMode* ARM64AMode_RI12 ( HReg reg, Int uimm12, UChar szB ) {
- ARM64AMode* am = LibVEX_Alloc(sizeof(ARM64AMode));
+ ARM64AMode* am = LibVEX_Alloc_inline(sizeof(ARM64AMode));
am->tag = ARM64am_RI12;
am->ARM64am.RI12.reg = reg;
am->ARM64am.RI12.uimm12 = uimm12;
}
ARM64AMode* ARM64AMode_RR ( HReg base, HReg index ) {
- ARM64AMode* am = LibVEX_Alloc(sizeof(ARM64AMode));
+ ARM64AMode* am = LibVEX_Alloc_inline(sizeof(ARM64AMode));
am->tag = ARM64am_RR;
am->ARM64am.RR.base = base;
am->ARM64am.RR.index = index;
/* --------- Reg or uimm12<<{0,12} operands --------- */
ARM64RIA* ARM64RIA_I12 ( UShort imm12, UChar shift ) {
- ARM64RIA* riA = LibVEX_Alloc(sizeof(ARM64RIA));
+ ARM64RIA* riA = LibVEX_Alloc_inline(sizeof(ARM64RIA));
riA->tag = ARM64riA_I12;
riA->ARM64riA.I12.imm12 = imm12;
riA->ARM64riA.I12.shift = shift;
return riA;
}
ARM64RIA* ARM64RIA_R ( HReg reg ) {
- ARM64RIA* riA = LibVEX_Alloc(sizeof(ARM64RIA));
+ ARM64RIA* riA = LibVEX_Alloc_inline(sizeof(ARM64RIA));
riA->tag = ARM64riA_R;
riA->ARM64riA.R.reg = reg;
return riA;
/* --------- Reg or "bitfield" (logic immediate) operands --------- */
ARM64RIL* ARM64RIL_I13 ( UChar bitN, UChar immR, UChar immS ) {
- ARM64RIL* riL = LibVEX_Alloc(sizeof(ARM64RIL));
+ ARM64RIL* riL = LibVEX_Alloc_inline(sizeof(ARM64RIL));
riL->tag = ARM64riL_I13;
riL->ARM64riL.I13.bitN = bitN;
riL->ARM64riL.I13.immR = immR;
return riL;
}
ARM64RIL* ARM64RIL_R ( HReg reg ) {
- ARM64RIL* riL = LibVEX_Alloc(sizeof(ARM64RIL));
+ ARM64RIL* riL = LibVEX_Alloc_inline(sizeof(ARM64RIL));
riL->tag = ARM64riL_R;
riL->ARM64riL.R.reg = reg;
return riL;
/* --------------- Reg or uimm6 operands --------------- */
ARM64RI6* ARM64RI6_I6 ( UInt imm6 ) {
- ARM64RI6* ri6 = LibVEX_Alloc(sizeof(ARM64RI6));
+ ARM64RI6* ri6 = LibVEX_Alloc_inline(sizeof(ARM64RI6));
ri6->tag = ARM64ri6_I6;
ri6->ARM64ri6.I6.imm6 = imm6;
vassert(imm6 > 0 && imm6 < 64);
return ri6;
}
ARM64RI6* ARM64RI6_R ( HReg reg ) {
- ARM64RI6* ri6 = LibVEX_Alloc(sizeof(ARM64RI6));
+ ARM64RI6* ri6 = LibVEX_Alloc_inline(sizeof(ARM64RI6));
ri6->tag = ARM64ri6_R;
ri6->ARM64ri6.R.reg = reg;
return ri6;
ARM64Instr* ARM64Instr_Arith ( HReg dst,
HReg argL, ARM64RIA* argR, Bool isAdd ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_Arith;
i->ARM64in.Arith.dst = dst;
i->ARM64in.Arith.argL = argL;
return i;
}
ARM64Instr* ARM64Instr_Cmp ( HReg argL, ARM64RIA* argR, Bool is64 ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_Cmp;
i->ARM64in.Cmp.argL = argL;
i->ARM64in.Cmp.argR = argR;
}
ARM64Instr* ARM64Instr_Logic ( HReg dst,
HReg argL, ARM64RIL* argR, ARM64LogicOp op ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_Logic;
i->ARM64in.Logic.dst = dst;
i->ARM64in.Logic.argL = argL;
return i;
}
ARM64Instr* ARM64Instr_Test ( HReg argL, ARM64RIL* argR ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_Test;
i->ARM64in.Test.argL = argL;
i->ARM64in.Test.argR = argR;
}
ARM64Instr* ARM64Instr_Shift ( HReg dst,
HReg argL, ARM64RI6* argR, ARM64ShiftOp op ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_Shift;
i->ARM64in.Shift.dst = dst;
i->ARM64in.Shift.argL = argL;
return i;
}
ARM64Instr* ARM64Instr_Unary ( HReg dst, HReg src, ARM64UnaryOp op ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_Unary;
i->ARM64in.Unary.dst = dst;
i->ARM64in.Unary.src = src;
return i;
}
ARM64Instr* ARM64Instr_MovI ( HReg dst, HReg src ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_MovI;
i->ARM64in.MovI.dst = dst;
i->ARM64in.MovI.src = src;
return i;
}
ARM64Instr* ARM64Instr_Imm64 ( HReg dst, ULong imm64 ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_Imm64;
i->ARM64in.Imm64.dst = dst;
i->ARM64in.Imm64.imm64 = imm64;
return i;
}
ARM64Instr* ARM64Instr_LdSt64 ( Bool isLoad, HReg rD, ARM64AMode* amode ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_LdSt64;
i->ARM64in.LdSt64.isLoad = isLoad;
i->ARM64in.LdSt64.rD = rD;
return i;
}
ARM64Instr* ARM64Instr_LdSt32 ( Bool isLoad, HReg rD, ARM64AMode* amode ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_LdSt32;
i->ARM64in.LdSt32.isLoad = isLoad;
i->ARM64in.LdSt32.rD = rD;
return i;
}
ARM64Instr* ARM64Instr_LdSt16 ( Bool isLoad, HReg rD, ARM64AMode* amode ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_LdSt16;
i->ARM64in.LdSt16.isLoad = isLoad;
i->ARM64in.LdSt16.rD = rD;
return i;
}
ARM64Instr* ARM64Instr_LdSt8 ( Bool isLoad, HReg rD, ARM64AMode* amode ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_LdSt8;
i->ARM64in.LdSt8.isLoad = isLoad;
i->ARM64in.LdSt8.rD = rD;
}
ARM64Instr* ARM64Instr_XDirect ( Addr64 dstGA, ARM64AMode* amPC,
ARM64CondCode cond, Bool toFastEP ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_XDirect;
i->ARM64in.XDirect.dstGA = dstGA;
i->ARM64in.XDirect.amPC = amPC;
}
ARM64Instr* ARM64Instr_XIndir ( HReg dstGA, ARM64AMode* amPC,
ARM64CondCode cond ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_XIndir;
i->ARM64in.XIndir.dstGA = dstGA;
i->ARM64in.XIndir.amPC = amPC;
}
ARM64Instr* ARM64Instr_XAssisted ( HReg dstGA, ARM64AMode* amPC,
ARM64CondCode cond, IRJumpKind jk ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_XAssisted;
i->ARM64in.XAssisted.dstGA = dstGA;
i->ARM64in.XAssisted.amPC = amPC;
}
ARM64Instr* ARM64Instr_CSel ( HReg dst, HReg argL, HReg argR,
ARM64CondCode cond ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_CSel;
i->ARM64in.CSel.dst = dst;
i->ARM64in.CSel.argL = argL;
}
ARM64Instr* ARM64Instr_Call ( ARM64CondCode cond, Addr64 target, Int nArgRegs,
RetLoc rloc ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_Call;
i->ARM64in.Call.cond = cond;
i->ARM64in.Call.target = target;
return i;
}
extern ARM64Instr* ARM64Instr_AddToSP ( Int simm ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_AddToSP;
i->ARM64in.AddToSP.simm = simm;
vassert(-4096 < simm && simm < 4096);
return i;
}
extern ARM64Instr* ARM64Instr_FromSP ( HReg dst ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_FromSP;
i->ARM64in.FromSP.dst = dst;
return i;
}
ARM64Instr* ARM64Instr_Mul ( HReg dst, HReg argL, HReg argR,
ARM64MulOp op ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_Mul;
i->ARM64in.Mul.dst = dst;
i->ARM64in.Mul.argL = argL;
return i;
}
ARM64Instr* ARM64Instr_LdrEX ( Int szB ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_LdrEX;
i->ARM64in.LdrEX.szB = szB;
vassert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
return i;
}
ARM64Instr* ARM64Instr_StrEX ( Int szB ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_StrEX;
i->ARM64in.StrEX.szB = szB;
vassert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
return i;
}
ARM64Instr* ARM64Instr_MFence ( void ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_MFence;
return i;
}
ARM64Instr* ARM64Instr_VLdStS ( Bool isLoad, HReg sD, HReg rN, UInt uimm12 ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VLdStS;
i->ARM64in.VLdStS.isLoad = isLoad;
i->ARM64in.VLdStS.sD = sD;
return i;
}
ARM64Instr* ARM64Instr_VLdStD ( Bool isLoad, HReg dD, HReg rN, UInt uimm12 ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VLdStD;
i->ARM64in.VLdStD.isLoad = isLoad;
i->ARM64in.VLdStD.dD = dD;
return i;
}
ARM64Instr* ARM64Instr_VLdStQ ( Bool isLoad, HReg rQ, HReg rN ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VLdStQ;
i->ARM64in.VLdStQ.isLoad = isLoad;
i->ARM64in.VLdStQ.rQ = rQ;
return i;
}
ARM64Instr* ARM64Instr_VCvtI2F ( ARM64CvtOp how, HReg rD, HReg rS ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VCvtI2F;
i->ARM64in.VCvtI2F.how = how;
i->ARM64in.VCvtI2F.rD = rD;
}
ARM64Instr* ARM64Instr_VCvtF2I ( ARM64CvtOp how, HReg rD, HReg rS,
UChar armRM ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VCvtF2I;
i->ARM64in.VCvtF2I.how = how;
i->ARM64in.VCvtF2I.rD = rD;
return i;
}
ARM64Instr* ARM64Instr_VCvtSD ( Bool sToD, HReg dst, HReg src ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VCvtSD;
i->ARM64in.VCvtSD.sToD = sToD;
i->ARM64in.VCvtSD.dst = dst;
return i;
}
ARM64Instr* ARM64Instr_VUnaryD ( ARM64FpUnaryOp op, HReg dst, HReg src ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VUnaryD;
i->ARM64in.VUnaryD.op = op;
i->ARM64in.VUnaryD.dst = dst;
return i;
}
ARM64Instr* ARM64Instr_VUnaryS ( ARM64FpUnaryOp op, HReg dst, HReg src ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VUnaryS;
i->ARM64in.VUnaryS.op = op;
i->ARM64in.VUnaryS.dst = dst;
}
ARM64Instr* ARM64Instr_VBinD ( ARM64FpBinOp op,
HReg dst, HReg argL, HReg argR ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VBinD;
i->ARM64in.VBinD.op = op;
i->ARM64in.VBinD.dst = dst;
}
ARM64Instr* ARM64Instr_VBinS ( ARM64FpBinOp op,
HReg dst, HReg argL, HReg argR ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VBinS;
i->ARM64in.VBinS.op = op;
i->ARM64in.VBinS.dst = dst;
return i;
}
ARM64Instr* ARM64Instr_VCmpD ( HReg argL, HReg argR ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VCmpD;
i->ARM64in.VCmpD.argL = argL;
i->ARM64in.VCmpD.argR = argR;
return i;
}
ARM64Instr* ARM64Instr_VCmpS ( HReg argL, HReg argR ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VCmpS;
i->ARM64in.VCmpS.argL = argL;
i->ARM64in.VCmpS.argR = argR;
}
ARM64Instr* ARM64Instr_VFCSel ( HReg dst, HReg argL, HReg argR,
ARM64CondCode cond, Bool isD ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VFCSel;
i->ARM64in.VFCSel.dst = dst;
i->ARM64in.VFCSel.argL = argL;
return i;
}
ARM64Instr* ARM64Instr_FPCR ( Bool toFPCR, HReg iReg ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_FPCR;
i->ARM64in.FPCR.toFPCR = toFPCR;
i->ARM64in.FPCR.iReg = iReg;
return i;
}
ARM64Instr* ARM64Instr_FPSR ( Bool toFPSR, HReg iReg ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_FPSR;
i->ARM64in.FPSR.toFPSR = toFPSR;
i->ARM64in.FPSR.iReg = iReg;
}
ARM64Instr* ARM64Instr_VBinV ( ARM64VecBinOp op,
HReg dst, HReg argL, HReg argR ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VBinV;
i->ARM64in.VBinV.op = op;
i->ARM64in.VBinV.dst = dst;
return i;
}
ARM64Instr* ARM64Instr_VModifyV ( ARM64VecModifyOp op, HReg mod, HReg arg ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VModifyV;
i->ARM64in.VModifyV.op = op;
i->ARM64in.VModifyV.mod = mod;
return i;
}
ARM64Instr* ARM64Instr_VUnaryV ( ARM64VecUnaryOp op, HReg dst, HReg arg ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VUnaryV;
i->ARM64in.VUnaryV.op = op;
i->ARM64in.VUnaryV.dst = dst;
}
ARM64Instr* ARM64Instr_VNarrowV ( ARM64VecNarrowOp op,
UInt dszBlg2, HReg dst, HReg src ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VNarrowV;
i->ARM64in.VNarrowV.op = op;
i->ARM64in.VNarrowV.dszBlg2 = dszBlg2;
}
ARM64Instr* ARM64Instr_VShiftImmV ( ARM64VecShiftImmOp op,
HReg dst, HReg src, UInt amt ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VShiftImmV;
i->ARM64in.VShiftImmV.op = op;
i->ARM64in.VShiftImmV.dst = dst;
return i;
}
ARM64Instr* ARM64Instr_VExtV ( HReg dst, HReg srcLo, HReg srcHi, UInt amtB ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VExtV;
i->ARM64in.VExtV.dst = dst;
i->ARM64in.VExtV.srcLo = srcLo;
return i;
}
ARM64Instr* ARM64Instr_VImmQ (HReg rQ, UShort imm) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VImmQ;
i->ARM64in.VImmQ.rQ = rQ;
i->ARM64in.VImmQ.imm = imm;
return i;
}
ARM64Instr* ARM64Instr_VDfromX ( HReg rD, HReg rX ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VDfromX;
i->ARM64in.VDfromX.rD = rD;
i->ARM64in.VDfromX.rX = rX;
return i;
}
ARM64Instr* ARM64Instr_VQfromX ( HReg rQ, HReg rXlo ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VQfromX;
i->ARM64in.VQfromX.rQ = rQ;
i->ARM64in.VQfromX.rXlo = rXlo;
return i;
}
ARM64Instr* ARM64Instr_VQfromXX ( HReg rQ, HReg rXhi, HReg rXlo ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VQfromXX;
i->ARM64in.VQfromXX.rQ = rQ;
i->ARM64in.VQfromXX.rXhi = rXhi;
return i;
}
ARM64Instr* ARM64Instr_VXfromQ ( HReg rX, HReg rQ, UInt laneNo ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VXfromQ;
i->ARM64in.VXfromQ.rX = rX;
i->ARM64in.VXfromQ.rQ = rQ;
return i;
}
ARM64Instr* ARM64Instr_VXfromDorS ( HReg rX, HReg rDorS, Bool fromD ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VXfromDorS;
i->ARM64in.VXfromDorS.rX = rX;
i->ARM64in.VXfromDorS.rDorS = rDorS;
return i;
}
ARM64Instr* ARM64Instr_VMov ( UInt szB, HReg dst, HReg src ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VMov;
i->ARM64in.VMov.szB = szB;
i->ARM64in.VMov.dst = dst;
}
ARM64Instr* ARM64Instr_EvCheck ( ARM64AMode* amCounter,
ARM64AMode* amFailAddr ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_EvCheck;
i->ARM64in.EvCheck.amCounter = amCounter;
i->ARM64in.EvCheck.amFailAddr = amFailAddr;
return i;
}
ARM64Instr* ARM64Instr_ProfInc ( void ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_ProfInc;
return i;
}
vassert(sizeof(ARM64Instr) <= 32);
/* Make up an initial environment to use. */
- env = LibVEX_Alloc(sizeof(ISelEnv));
+ env = LibVEX_Alloc_inline(sizeof(ISelEnv));
env->vreg_ctr = 0;
/* Set up output code array. */
/* Make up an IRTemp -> virtual HReg mapping. This doesn't
change as we go along. */
env->n_vregmap = bb->tyenv->types_used;
- env->vregmap = LibVEX_Alloc(env->n_vregmap * sizeof(HReg));
- env->vregmapHI = LibVEX_Alloc(env->n_vregmap * sizeof(HReg));
+ env->vregmap = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
+ env->vregmapHI = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
/* and finally ... */
env->chainingAllowed = chainingAllowed;
{
Int i = 0;
*nregs = 26;
- *arr = LibVEX_Alloc(*nregs * sizeof(HReg));
+ *arr = LibVEX_Alloc_inline(*nregs * sizeof(HReg));
// callee saves ones are listed first, since we prefer them
// if they're available
(*arr)[i++] = hregARM_R4();
/* --------- Mem AModes: Addressing Mode 1 --------- */
ARMAMode1* ARMAMode1_RI ( HReg reg, Int simm13 ) {
- ARMAMode1* am = LibVEX_Alloc(sizeof(ARMAMode1));
+ ARMAMode1* am = LibVEX_Alloc_inline(sizeof(ARMAMode1));
am->tag = ARMam1_RI;
am->ARMam1.RI.reg = reg;
am->ARMam1.RI.simm13 = simm13;
return am;
}
ARMAMode1* ARMAMode1_RRS ( HReg base, HReg index, UInt shift ) {
- ARMAMode1* am = LibVEX_Alloc(sizeof(ARMAMode1));
+ ARMAMode1* am = LibVEX_Alloc_inline(sizeof(ARMAMode1));
am->tag = ARMam1_RRS;
am->ARMam1.RRS.base = base;
am->ARMam1.RRS.index = index;
/* --------- Mem AModes: Addressing Mode 2 --------- */
ARMAMode2* ARMAMode2_RI ( HReg reg, Int simm9 ) {
- ARMAMode2* am = LibVEX_Alloc(sizeof(ARMAMode2));
+ ARMAMode2* am = LibVEX_Alloc_inline(sizeof(ARMAMode2));
am->tag = ARMam2_RI;
am->ARMam2.RI.reg = reg;
am->ARMam2.RI.simm9 = simm9;
return am;
}
ARMAMode2* ARMAMode2_RR ( HReg base, HReg index ) {
- ARMAMode2* am = LibVEX_Alloc(sizeof(ARMAMode2));
+ ARMAMode2* am = LibVEX_Alloc_inline(sizeof(ARMAMode2));
am->tag = ARMam2_RR;
am->ARMam2.RR.base = base;
am->ARMam2.RR.index = index;
/* --------- Mem AModes: Addressing Mode VFP --------- */
ARMAModeV* mkARMAModeV ( HReg reg, Int simm11 ) {
- ARMAModeV* am = LibVEX_Alloc(sizeof(ARMAModeV));
+ ARMAModeV* am = LibVEX_Alloc_inline(sizeof(ARMAModeV));
vassert(simm11 >= -1020 && simm11 <= 1020);
vassert(0 == (simm11 & 3));
am->reg = reg;
/* --------- Mem AModes: Addressing Mode Neon ------- */
ARMAModeN *mkARMAModeN_RR ( HReg rN, HReg rM ) {
- ARMAModeN* am = LibVEX_Alloc(sizeof(ARMAModeN));
+ ARMAModeN* am = LibVEX_Alloc_inline(sizeof(ARMAModeN));
am->tag = ARMamN_RR;
am->ARMamN.RR.rN = rN;
am->ARMamN.RR.rM = rM;
}
ARMAModeN *mkARMAModeN_R ( HReg rN ) {
- ARMAModeN* am = LibVEX_Alloc(sizeof(ARMAModeN));
+ ARMAModeN* am = LibVEX_Alloc_inline(sizeof(ARMAModeN));
am->tag = ARMamN_R;
am->ARMamN.R.rN = rN;
return am;
}
ARMRI84* ARMRI84_I84 ( UShort imm8, UShort imm4 ) {
- ARMRI84* ri84 = LibVEX_Alloc(sizeof(ARMRI84));
+ ARMRI84* ri84 = LibVEX_Alloc_inline(sizeof(ARMRI84));
ri84->tag = ARMri84_I84;
ri84->ARMri84.I84.imm8 = imm8;
ri84->ARMri84.I84.imm4 = imm4;
return ri84;
}
ARMRI84* ARMRI84_R ( HReg reg ) {
- ARMRI84* ri84 = LibVEX_Alloc(sizeof(ARMRI84));
+ ARMRI84* ri84 = LibVEX_Alloc_inline(sizeof(ARMRI84));
ri84->tag = ARMri84_R;
ri84->ARMri84.R.reg = reg;
return ri84;
/* --------- Reg or imm5 operands --------- */
ARMRI5* ARMRI5_I5 ( UInt imm5 ) {
- ARMRI5* ri5 = LibVEX_Alloc(sizeof(ARMRI5));
+ ARMRI5* ri5 = LibVEX_Alloc_inline(sizeof(ARMRI5));
ri5->tag = ARMri5_I5;
ri5->ARMri5.I5.imm5 = imm5;
vassert(imm5 > 0 && imm5 <= 31); // zero is not allowed
return ri5;
}
ARMRI5* ARMRI5_R ( HReg reg ) {
- ARMRI5* ri5 = LibVEX_Alloc(sizeof(ARMRI5));
+ ARMRI5* ri5 = LibVEX_Alloc_inline(sizeof(ARMRI5));
ri5->tag = ARMri5_R;
ri5->ARMri5.R.reg = reg;
return ri5;
/* -------- Neon Immediate operatnd --------- */
ARMNImm* ARMNImm_TI ( UInt type, UInt imm8 ) {
- ARMNImm* i = LibVEX_Alloc(sizeof(ARMNImm));
+ ARMNImm* i = LibVEX_Alloc_inline(sizeof(ARMNImm));
i->type = type;
i->imm8 = imm8;
return i;
ARMNRS* mkARMNRS(ARMNRS_tag tag, HReg reg, UInt index)
{
- ARMNRS *p = LibVEX_Alloc(sizeof(ARMNRS));
+ ARMNRS *p = LibVEX_Alloc_inline(sizeof(ARMNRS));
p->tag = tag;
p->reg = reg;
p->index = index;
ARMInstr* ARMInstr_Alu ( ARMAluOp op,
HReg dst, HReg argL, ARMRI84* argR ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_Alu;
i->ARMin.Alu.op = op;
i->ARMin.Alu.dst = dst;
}
ARMInstr* ARMInstr_Shift ( ARMShiftOp op,
HReg dst, HReg argL, ARMRI5* argR ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_Shift;
i->ARMin.Shift.op = op;
i->ARMin.Shift.dst = dst;
return i;
}
ARMInstr* ARMInstr_Unary ( ARMUnaryOp op, HReg dst, HReg src ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_Unary;
i->ARMin.Unary.op = op;
i->ARMin.Unary.dst = dst;
return i;
}
ARMInstr* ARMInstr_CmpOrTst ( Bool isCmp, HReg argL, ARMRI84* argR ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_CmpOrTst;
i->ARMin.CmpOrTst.isCmp = isCmp;
i->ARMin.CmpOrTst.argL = argL;
return i;
}
ARMInstr* ARMInstr_Mov ( HReg dst, ARMRI84* src ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_Mov;
i->ARMin.Mov.dst = dst;
i->ARMin.Mov.src = src;
return i;
}
ARMInstr* ARMInstr_Imm32 ( HReg dst, UInt imm32 ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_Imm32;
i->ARMin.Imm32.dst = dst;
i->ARMin.Imm32.imm32 = imm32;
}
ARMInstr* ARMInstr_LdSt32 ( ARMCondCode cc,
Bool isLoad, HReg rD, ARMAMode1* amode ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_LdSt32;
i->ARMin.LdSt32.cc = cc;
i->ARMin.LdSt32.isLoad = isLoad;
ARMInstr* ARMInstr_LdSt16 ( ARMCondCode cc,
Bool isLoad, Bool signedLoad,
HReg rD, ARMAMode2* amode ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_LdSt16;
i->ARMin.LdSt16.cc = cc;
i->ARMin.LdSt16.isLoad = isLoad;
}
ARMInstr* ARMInstr_LdSt8U ( ARMCondCode cc,
Bool isLoad, HReg rD, ARMAMode1* amode ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_LdSt8U;
i->ARMin.LdSt8U.cc = cc;
i->ARMin.LdSt8U.isLoad = isLoad;
return i;
}
ARMInstr* ARMInstr_Ld8S ( ARMCondCode cc, HReg rD, ARMAMode2* amode ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_Ld8S;
i->ARMin.Ld8S.cc = cc;
i->ARMin.Ld8S.rD = rD;
}
ARMInstr* ARMInstr_XDirect ( Addr32 dstGA, ARMAMode1* amR15T,
ARMCondCode cond, Bool toFastEP ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_XDirect;
i->ARMin.XDirect.dstGA = dstGA;
i->ARMin.XDirect.amR15T = amR15T;
}
ARMInstr* ARMInstr_XIndir ( HReg dstGA, ARMAMode1* amR15T,
ARMCondCode cond ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_XIndir;
i->ARMin.XIndir.dstGA = dstGA;
i->ARMin.XIndir.amR15T = amR15T;
}
ARMInstr* ARMInstr_XAssisted ( HReg dstGA, ARMAMode1* amR15T,
ARMCondCode cond, IRJumpKind jk ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_XAssisted;
i->ARMin.XAssisted.dstGA = dstGA;
i->ARMin.XAssisted.amR15T = amR15T;
return i;
}
ARMInstr* ARMInstr_CMov ( ARMCondCode cond, HReg dst, ARMRI84* src ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_CMov;
i->ARMin.CMov.cond = cond;
i->ARMin.CMov.dst = dst;
}
ARMInstr* ARMInstr_Call ( ARMCondCode cond, Addr32 target, Int nArgRegs,
RetLoc rloc ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_Call;
i->ARMin.Call.cond = cond;
i->ARMin.Call.target = target;
return i;
}
ARMInstr* ARMInstr_Mul ( ARMMulOp op ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_Mul;
i->ARMin.Mul.op = op;
return i;
}
ARMInstr* ARMInstr_LdrEX ( Int szB ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_LdrEX;
i->ARMin.LdrEX.szB = szB;
vassert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
return i;
}
ARMInstr* ARMInstr_StrEX ( Int szB ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_StrEX;
i->ARMin.StrEX.szB = szB;
vassert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
return i;
}
ARMInstr* ARMInstr_VLdStD ( Bool isLoad, HReg dD, ARMAModeV* am ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_VLdStD;
i->ARMin.VLdStD.isLoad = isLoad;
i->ARMin.VLdStD.dD = dD;
return i;
}
ARMInstr* ARMInstr_VLdStS ( Bool isLoad, HReg fD, ARMAModeV* am ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_VLdStS;
i->ARMin.VLdStS.isLoad = isLoad;
i->ARMin.VLdStS.fD = fD;
return i;
}
ARMInstr* ARMInstr_VAluD ( ARMVfpOp op, HReg dst, HReg argL, HReg argR ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_VAluD;
i->ARMin.VAluD.op = op;
i->ARMin.VAluD.dst = dst;
return i;
}
ARMInstr* ARMInstr_VAluS ( ARMVfpOp op, HReg dst, HReg argL, HReg argR ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_VAluS;
i->ARMin.VAluS.op = op;
i->ARMin.VAluS.dst = dst;
return i;
}
ARMInstr* ARMInstr_VUnaryD ( ARMVfpUnaryOp op, HReg dst, HReg src ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_VUnaryD;
i->ARMin.VUnaryD.op = op;
i->ARMin.VUnaryD.dst = dst;
return i;
}
ARMInstr* ARMInstr_VUnaryS ( ARMVfpUnaryOp op, HReg dst, HReg src ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_VUnaryS;
i->ARMin.VUnaryS.op = op;
i->ARMin.VUnaryS.dst = dst;
return i;
}
ARMInstr* ARMInstr_VCmpD ( HReg argL, HReg argR ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_VCmpD;
i->ARMin.VCmpD.argL = argL;
i->ARMin.VCmpD.argR = argR;
return i;
}
ARMInstr* ARMInstr_VCMovD ( ARMCondCode cond, HReg dst, HReg src ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_VCMovD;
i->ARMin.VCMovD.cond = cond;
i->ARMin.VCMovD.dst = dst;
return i;
}
ARMInstr* ARMInstr_VCMovS ( ARMCondCode cond, HReg dst, HReg src ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_VCMovS;
i->ARMin.VCMovS.cond = cond;
i->ARMin.VCMovS.dst = dst;
return i;
}
ARMInstr* ARMInstr_VCvtSD ( Bool sToD, HReg dst, HReg src ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_VCvtSD;
i->ARMin.VCvtSD.sToD = sToD;
i->ARMin.VCvtSD.dst = dst;
return i;
}
ARMInstr* ARMInstr_VXferD ( Bool toD, HReg dD, HReg rHi, HReg rLo ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_VXferD;
i->ARMin.VXferD.toD = toD;
i->ARMin.VXferD.dD = dD;
return i;
}
ARMInstr* ARMInstr_VXferS ( Bool toS, HReg fD, HReg rLo ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_VXferS;
i->ARMin.VXferS.toS = toS;
i->ARMin.VXferS.fD = fD;
}
ARMInstr* ARMInstr_VCvtID ( Bool iToD, Bool syned,
HReg dst, HReg src ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_VCvtID;
i->ARMin.VCvtID.iToD = iToD;
i->ARMin.VCvtID.syned = syned;
return i;
}
ARMInstr* ARMInstr_FPSCR ( Bool toFPSCR, HReg iReg ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_FPSCR;
i->ARMin.FPSCR.toFPSCR = toFPSCR;
i->ARMin.FPSCR.iReg = iReg;
return i;
}
ARMInstr* ARMInstr_MFence ( void ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_MFence;
return i;
}
ARMInstr* ARMInstr_CLREX( void ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_CLREX;
return i;
}
ARMInstr* ARMInstr_NLdStQ ( Bool isLoad, HReg dQ, ARMAModeN *amode ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_NLdStQ;
i->ARMin.NLdStQ.isLoad = isLoad;
i->ARMin.NLdStQ.dQ = dQ;
}
ARMInstr* ARMInstr_NLdStD ( Bool isLoad, HReg dD, ARMAModeN *amode ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_NLdStD;
i->ARMin.NLdStD.isLoad = isLoad;
i->ARMin.NLdStD.dD = dD;
ARMInstr* ARMInstr_NUnary ( ARMNeonUnOp op, HReg dQ, HReg nQ,
UInt size, Bool Q ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_NUnary;
i->ARMin.NUnary.op = op;
i->ARMin.NUnary.src = nQ;
ARMInstr* ARMInstr_NUnaryS ( ARMNeonUnOpS op, ARMNRS* dst, ARMNRS* src,
UInt size, Bool Q ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_NUnaryS;
i->ARMin.NUnaryS.op = op;
i->ARMin.NUnaryS.src = src;
ARMInstr* ARMInstr_NDual ( ARMNeonDualOp op, HReg nQ, HReg mQ,
UInt size, Bool Q ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_NDual;
i->ARMin.NDual.op = op;
i->ARMin.NDual.arg1 = nQ;
ARMInstr* ARMInstr_NBinary ( ARMNeonBinOp op,
HReg dst, HReg argL, HReg argR,
UInt size, Bool Q ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_NBinary;
i->ARMin.NBinary.op = op;
i->ARMin.NBinary.argL = argL;
}
ARMInstr* ARMInstr_NeonImm (HReg dst, ARMNImm* imm ) {
- ARMInstr *i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr *i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_NeonImm;
i->ARMin.NeonImm.dst = dst;
i->ARMin.NeonImm.imm = imm;
}
ARMInstr* ARMInstr_NCMovQ ( ARMCondCode cond, HReg dst, HReg src ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_NCMovQ;
i->ARMin.NCMovQ.cond = cond;
i->ARMin.NCMovQ.dst = dst;
ARMInstr* ARMInstr_NShift ( ARMNeonShiftOp op,
HReg dst, HReg argL, HReg argR,
UInt size, Bool Q ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_NShift;
i->ARMin.NShift.op = op;
i->ARMin.NShift.argL = argL;
ARMInstr* ARMInstr_NShl64 ( HReg dst, HReg src, UInt amt )
{
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_NShl64;
i->ARMin.NShl64.dst = dst;
i->ARMin.NShl64.src = src;
ARMInstr* ARMInstr_Add32 ( HReg rD, HReg rN, UInt imm32 ) {
UInt u8, u4;
- ARMInstr *i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr *i = LibVEX_Alloc_inline(sizeof(ARMInstr));
/* Try to generate single ADD if possible */
if (fitsIn8x4(&u8, &u4, imm32)) {
i->tag = ARMin_Alu;
ARMInstr* ARMInstr_EvCheck ( ARMAMode1* amCounter,
ARMAMode1* amFailAddr ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_EvCheck;
i->ARMin.EvCheck.amCounter = amCounter;
i->ARMin.EvCheck.amFailAddr = amFailAddr;
}
ARMInstr* ARMInstr_ProfInc ( void ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_ProfInc;
return i;
}
arm_hwcaps = hwcaps_host; // JRS 2012 Mar 31: FIXME (RM)
/* Make up an initial environment to use. */
- env = LibVEX_Alloc(sizeof(ISelEnv));
+ env = LibVEX_Alloc_inline(sizeof(ISelEnv));
env->vreg_ctr = 0;
/* Set up output code array. */
/* Make up an IRTemp -> virtual HReg mapping. This doesn't
change as we go along. */
env->n_vregmap = bb->tyenv->types_used;
- env->vregmap = LibVEX_Alloc(env->n_vregmap * sizeof(HReg));
- env->vregmapHI = LibVEX_Alloc(env->n_vregmap * sizeof(HReg));
+ env->vregmap = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
+ env->vregmapHI = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
/* and finally ... */
env->chainingAllowed = chainingAllowed;
if (0)
vex_printf("ensureRRISpace: %d -> %d\n", *size, 2 * *size);
vassert(used == *size);
- arr2 = LibVEX_Alloc(2 * *size * sizeof(RRegLR));
+ arr2 = LibVEX_Alloc_inline(2 * *size * sizeof(RRegLR));
for (k = 0; k < *size; k++)
arr2[k] = (*info)[k];
*size *= 2;
/* If this is not so, vreg_state entries will overflow. */
vassert(n_vregs < 32767);
- rreg_state = LibVEX_Alloc(n_rregs * sizeof(RRegState));
- vreg_state = LibVEX_Alloc(n_vregs * sizeof(Short));
+ rreg_state = LibVEX_Alloc_inline(n_rregs * sizeof(RRegState));
+ vreg_state = LibVEX_Alloc_inline(n_vregs * sizeof(Short));
for (j = 0; j < n_rregs; j++) {
rreg_state[j].rreg = available_real_regs[j];
vreg_lrs = NULL;
if (n_vregs > 0)
- vreg_lrs = LibVEX_Alloc(sizeof(VRegLR) * n_vregs);
+ vreg_lrs = LibVEX_Alloc_inline(sizeof(VRegLR) * n_vregs);
for (j = 0; j < n_vregs; j++) {
vreg_lrs[j].live_after = INVALID_INSTRNO;
rreg_lrs_used = 0;
rreg_lrs_size = 4;
- rreg_lrs_la = LibVEX_Alloc(rreg_lrs_size * sizeof(RRegLR));
+ rreg_lrs_la = LibVEX_Alloc_inline(rreg_lrs_size * sizeof(RRegLR));
rreg_lrs_db = NULL; /* we'll create this later */
/* We'll need to track live range start/end points seperately for
each rreg. Sigh. */
vassert(n_available_real_regs > 0);
- rreg_live_after = LibVEX_Alloc(n_available_real_regs * sizeof(Int));
- rreg_dead_before = LibVEX_Alloc(n_available_real_regs * sizeof(Int));
+ rreg_live_after = LibVEX_Alloc_inline(n_available_real_regs * sizeof(Int));
+ rreg_dead_before = LibVEX_Alloc_inline(n_available_real_regs * sizeof(Int));
for (j = 0; j < n_available_real_regs; j++) {
rreg_live_after[j] =
/* Finally, copy the _la variant into the _db variant and
sort both by their respective fields. */
- rreg_lrs_db = LibVEX_Alloc(rreg_lrs_used * sizeof(RRegLR));
+ rreg_lrs_db = LibVEX_Alloc_inline(rreg_lrs_used * sizeof(RRegLR));
for (j = 0; j < rreg_lrs_used; j++)
rreg_lrs_db[j] = rreg_lrs_la[j];
HInstrArray* newHInstrArray ( void )
{
- HInstrArray* ha = LibVEX_Alloc(sizeof(HInstrArray));
+ HInstrArray* ha = LibVEX_Alloc_inline(sizeof(HInstrArray));
ha->arr_size = 4;
ha->arr_used = 0;
- ha->arr = LibVEX_Alloc(ha->arr_size * sizeof(HInstr*));
+ ha->arr = LibVEX_Alloc_inline(ha->arr_size * sizeof(HInstr*));
ha->n_vregs = 0;
return ha;
}
ha->arr_used++;
} else {
Int i;
- HInstr** arr2 = LibVEX_Alloc(ha->arr_size * 2 * sizeof(HInstr*));
+ HInstr** arr2 = LibVEX_Alloc_inline(ha->arr_size * 2 * sizeof(HInstr*));
for (i = 0; i < ha->arr_size; i++)
arr2[i] = ha->arr[i];
ha->arr_size *= 2;
else
*nregs = 28;
UInt i = 0;
- *arr = LibVEX_Alloc(*nregs * sizeof(HReg));
+ *arr = LibVEX_Alloc_inline(*nregs * sizeof(HReg));
/* ZERO = constant 0
AT = assembler temporary
MIPSAMode *MIPSAMode_IR(Int idx, HReg base)
{
- MIPSAMode *am = LibVEX_Alloc(sizeof(MIPSAMode));
+ MIPSAMode *am = LibVEX_Alloc_inline(sizeof(MIPSAMode));
am->tag = Mam_IR;
am->Mam.IR.base = base;
am->Mam.IR.index = idx;
MIPSAMode *MIPSAMode_RR(HReg idx, HReg base)
{
- MIPSAMode *am = LibVEX_Alloc(sizeof(MIPSAMode));
+ MIPSAMode *am = LibVEX_Alloc_inline(sizeof(MIPSAMode));
am->tag = Mam_RR;
am->Mam.RR.base = base;
am->Mam.RR.index = idx;
MIPSRH *MIPSRH_Imm(Bool syned, UShort imm16)
{
- MIPSRH *op = LibVEX_Alloc(sizeof(MIPSRH));
+ MIPSRH *op = LibVEX_Alloc_inline(sizeof(MIPSRH));
op->tag = Mrh_Imm;
op->Mrh.Imm.syned = syned;
op->Mrh.Imm.imm16 = imm16;
MIPSRH *MIPSRH_Reg(HReg reg)
{
- MIPSRH *op = LibVEX_Alloc(sizeof(MIPSRH));
+ MIPSRH *op = LibVEX_Alloc_inline(sizeof(MIPSRH));
op->tag = Mrh_Reg;
op->Mrh.Reg.reg = reg;
return op;
MIPSInstr *MIPSInstr_LI(HReg dst, ULong imm)
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_LI;
i->Min.LI.dst = dst;
i->Min.LI.imm = imm;
MIPSInstr *MIPSInstr_Alu(MIPSAluOp op, HReg dst, HReg srcL, MIPSRH * srcR)
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Alu;
i->Min.Alu.op = op;
i->Min.Alu.dst = dst;
MIPSInstr *MIPSInstr_Shft(MIPSShftOp op, Bool sz32, HReg dst, HReg srcL,
MIPSRH * srcR)
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Shft;
i->Min.Shft.op = op;
i->Min.Shft.sz32 = sz32;
MIPSInstr *MIPSInstr_Unary(MIPSUnaryOp op, HReg dst, HReg src)
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Unary;
i->Min.Unary.op = op;
i->Min.Unary.dst = dst;
MIPSInstr *MIPSInstr_Cmp(Bool syned, Bool sz32, HReg dst, HReg srcL, HReg srcR,
MIPSCondCode cond)
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Cmp;
i->Min.Cmp.syned = syned;
i->Min.Cmp.sz32 = sz32;
MIPSInstr *MIPSInstr_Mul(Bool syned, Bool wid, Bool sz32, HReg dst, HReg srcL,
HReg srcR)
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Mul;
i->Min.Mul.syned = syned;
i->Min.Mul.widening = wid; /* widen=True else False */
/* msub */
MIPSInstr *MIPSInstr_Msub(Bool syned, HReg srcL, HReg srcR)
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Macc;
i->Min.Macc.op = Macc_SUB;
/* madd */
MIPSInstr *MIPSInstr_Madd(Bool syned, HReg srcL, HReg srcR)
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Macc;
i->Min.Macc.op = Macc_ADD;
/* div */
MIPSInstr *MIPSInstr_Div(Bool syned, Bool sz32, HReg srcL, HReg srcR)
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Div;
i->Min.Div.syned = syned;
i->Min.Div.sz32 = sz32; /* True = 32 bits */
HReg src, RetLoc rloc )
{
UInt mask;
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Call;
i->Min.Call.cond = cond;
i->Min.Call.target = target;
UInt argiregs, RetLoc rloc )
{
UInt mask;
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Call;
i->Min.Call.cond = cond;
i->Min.Call.target = target;
MIPSInstr *MIPSInstr_XDirect ( Addr64 dstGA, MIPSAMode* amPC,
MIPSCondCode cond, Bool toFastEP ) {
- MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_XDirect;
i->Min.XDirect.dstGA = dstGA;
i->Min.XDirect.amPC = amPC;
MIPSInstr *MIPSInstr_XIndir ( HReg dstGA, MIPSAMode* amPC,
MIPSCondCode cond ) {
- MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_XIndir;
i->Min.XIndir.dstGA = dstGA;
i->Min.XIndir.amPC = amPC;
MIPSInstr *MIPSInstr_XAssisted ( HReg dstGA, MIPSAMode* amPC,
MIPSCondCode cond, IRJumpKind jk ) {
- MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_XAssisted;
i->Min.XAssisted.dstGA = dstGA;
i->Min.XAssisted.amPC = amPC;
MIPSInstr *MIPSInstr_Load(UChar sz, HReg dst, MIPSAMode * src, Bool mode64)
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Load;
i->Min.Load.sz = sz;
i->Min.Load.src = src;
MIPSInstr *MIPSInstr_Store(UChar sz, MIPSAMode * dst, HReg src, Bool mode64)
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Store;
i->Min.Store.sz = sz;
i->Min.Store.src = src;
MIPSInstr *MIPSInstr_LoadL(UChar sz, HReg dst, MIPSAMode * src, Bool mode64)
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_LoadL;
i->Min.LoadL.sz = sz;
i->Min.LoadL.src = src;
MIPSInstr *MIPSInstr_Cas(UChar sz, HReg old, HReg addr,
HReg expd, HReg data, Bool mode64)
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Cas;
i->Min.Cas.sz = sz;
i->Min.Cas.old = old;
MIPSInstr *MIPSInstr_StoreC(UChar sz, MIPSAMode * dst, HReg src, Bool mode64)
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_StoreC;
i->Min.StoreC.sz = sz;
i->Min.StoreC.src = src;
MIPSInstr *MIPSInstr_Mthi(HReg src)
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Mthi;
i->Min.MtHL.src = src;
return i;
MIPSInstr *MIPSInstr_Mtlo(HReg src)
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Mtlo;
i->Min.MtHL.src = src;
return i;
MIPSInstr *MIPSInstr_Mfhi(HReg dst)
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Mfhi;
i->Min.MfHL.dst = dst;
return i;
MIPSInstr *MIPSInstr_Mflo(HReg dst)
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Mflo;
i->Min.MfHL.dst = dst;
return i;
/* Read/Write Link Register */
MIPSInstr *MIPSInstr_RdWrLR(Bool wrLR, HReg gpr)
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_RdWrLR;
i->Min.RdWrLR.wrLR = wrLR;
i->Min.RdWrLR.gpr = gpr;
MIPSInstr *MIPSInstr_FpLdSt(Bool isLoad, UChar sz, HReg reg, MIPSAMode * addr)
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_FpLdSt;
i->Min.FpLdSt.isLoad = isLoad;
i->Min.FpLdSt.sz = sz;
MIPSInstr *MIPSInstr_FpUnary(MIPSFpOp op, HReg dst, HReg src)
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_FpUnary;
i->Min.FpUnary.op = op;
i->Min.FpUnary.dst = dst;
MIPSInstr *MIPSInstr_FpBinary(MIPSFpOp op, HReg dst, HReg srcL, HReg srcR)
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_FpBinary;
i->Min.FpBinary.op = op;
i->Min.FpBinary.dst = dst;
MIPSInstr *MIPSInstr_FpTernary ( MIPSFpOp op, HReg dst, HReg src1, HReg src2,
HReg src3 )
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_FpTernary;
i->Min.FpTernary.op = op;
i->Min.FpTernary.dst = dst;
MIPSInstr *MIPSInstr_FpConvert(MIPSFpOp op, HReg dst, HReg src)
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_FpConvert;
i->Min.FpConvert.op = op;
i->Min.FpConvert.dst = dst;
MIPSInstr *MIPSInstr_FpCompare(MIPSFpOp op, HReg dst, HReg srcL, HReg srcR)
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_FpCompare;
i->Min.FpCompare.op = op;
i->Min.FpCompare.dst = dst;
MIPSInstr *MIPSInstr_MtFCSR(HReg src)
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_MtFCSR;
i->Min.MtFCSR.src = src;
return i;
MIPSInstr *MIPSInstr_MfFCSR(HReg dst)
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_MfFCSR;
i->Min.MfFCSR.dst = dst;
return i;
MIPSInstr *MIPSInstr_FpGpMove ( MIPSFpGpMoveOp op, HReg dst, HReg src )
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_FpGpMove;
i->Min.FpGpMove.op = op;
i->Min.FpGpMove.dst = dst;
MIPSInstr *MIPSInstr_MoveCond ( MIPSMoveCondOp op, HReg dst, HReg src,
HReg cond )
{
- MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_MoveCond;
i->Min.MoveCond.op = op;
i->Min.MoveCond.dst = dst;
MIPSInstr *MIPSInstr_EvCheck ( MIPSAMode* amCounter,
MIPSAMode* amFailAddr ) {
- MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_EvCheck;
i->Min.EvCheck.amCounter = amCounter;
i->Min.EvCheck.amFailAddr = amFailAddr;
}
MIPSInstr* MIPSInstr_ProfInc ( void ) {
- MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr));
+ MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_ProfInc;
return i;
}
#endif
/* Make up an initial environment to use. */
- env = LibVEX_Alloc(sizeof(ISelEnv));
+ env = LibVEX_Alloc_inline(sizeof(ISelEnv));
env->vreg_ctr = 0;
env->mode64 = mode64;
env->fp_mode64 = fp_mode64;
/* Make up an IRTemp -> virtual HReg mapping. This doesn't
change as we go along. */
env->n_vregmap = bb->tyenv->types_used;
- env->vregmap = LibVEX_Alloc(env->n_vregmap * sizeof(HReg));
- env->vregmapHI = LibVEX_Alloc(env->n_vregmap * sizeof(HReg));
+ env->vregmap = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
+ env->vregmapHI = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
/* and finally ... */
env->hwcaps = hwcaps_host;
*nregs = (32-9) + (32-24) + (32-24);
else
*nregs = (32-7) + (32-24) + (32-24);
- *arr = LibVEX_Alloc(*nregs * sizeof(HReg));
+ *arr = LibVEX_Alloc_inline(*nregs * sizeof(HReg));
// GPR0 = scratch reg where poss. - some ops interpret as value zero
// GPR1 = stack pointer
// GPR2 = TOC pointer
/* --------- PPCAMode: memory address expressions. --------- */
PPCAMode* PPCAMode_IR ( Int idx, HReg base ) {
- PPCAMode* am = LibVEX_Alloc(sizeof(PPCAMode));
+ PPCAMode* am = LibVEX_Alloc_inline(sizeof(PPCAMode));
vassert(idx >= -0x8000 && idx < 0x8000);
am->tag = Pam_IR;
am->Pam.IR.base = base;
return am;
}
PPCAMode* PPCAMode_RR ( HReg idx, HReg base ) {
- PPCAMode* am = LibVEX_Alloc(sizeof(PPCAMode));
+ PPCAMode* am = LibVEX_Alloc_inline(sizeof(PPCAMode));
am->tag = Pam_RR;
am->Pam.RR.base = base;
am->Pam.RR.index = idx;
/* --------- Operand, which can be a reg or a u16/s16. --------- */
PPCRH* PPCRH_Imm ( Bool syned, UShort imm16 ) {
- PPCRH* op = LibVEX_Alloc(sizeof(PPCRH));
+ PPCRH* op = LibVEX_Alloc_inline(sizeof(PPCRH));
op->tag = Prh_Imm;
op->Prh.Imm.syned = syned;
op->Prh.Imm.imm16 = imm16;
return op;
}
PPCRH* PPCRH_Reg ( HReg reg ) {
- PPCRH* op = LibVEX_Alloc(sizeof(PPCRH));
+ PPCRH* op = LibVEX_Alloc_inline(sizeof(PPCRH));
op->tag = Prh_Reg;
op->Prh.Reg.reg = reg;
return op;
/* --------- Operand, which can be a reg or a u32/64. --------- */
PPCRI* PPCRI_Imm ( ULong imm64 ) {
- PPCRI* op = LibVEX_Alloc(sizeof(PPCRI));
+ PPCRI* op = LibVEX_Alloc_inline(sizeof(PPCRI));
op->tag = Pri_Imm;
op->Pri.Imm = imm64;
return op;
}
PPCRI* PPCRI_Reg ( HReg reg ) {
- PPCRI* op = LibVEX_Alloc(sizeof(PPCRI));
+ PPCRI* op = LibVEX_Alloc_inline(sizeof(PPCRI));
op->tag = Pri_Reg;
op->Pri.Reg = reg;
return op;
/* --------- Operand, which can be a vector reg or a simm5. --------- */
PPCVI5s* PPCVI5s_Imm ( Char simm5 ) {
- PPCVI5s* op = LibVEX_Alloc(sizeof(PPCVI5s));
+ PPCVI5s* op = LibVEX_Alloc_inline(sizeof(PPCVI5s));
op->tag = Pvi_Imm;
op->Pvi.Imm5s = simm5;
vassert(simm5 >= -16 && simm5 <= 15);
return op;
}
PPCVI5s* PPCVI5s_Reg ( HReg reg ) {
- PPCVI5s* op = LibVEX_Alloc(sizeof(PPCVI5s));
+ PPCVI5s* op = LibVEX_Alloc_inline(sizeof(PPCVI5s));
op->tag = Pvi_Reg;
op->Pvi.Reg = reg;
vassert(hregClass(reg) == HRcVec128);
PPCInstr* PPCInstr_LI ( HReg dst, ULong imm64, Bool mode64 )
{
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_LI;
i->Pin.LI.dst = dst;
i->Pin.LI.imm64 = imm64;
}
PPCInstr* PPCInstr_Alu ( PPCAluOp op, HReg dst,
HReg srcL, PPCRH* srcR ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_Alu;
i->Pin.Alu.op = op;
i->Pin.Alu.dst = dst;
}
PPCInstr* PPCInstr_Shft ( PPCShftOp op, Bool sz32,
HReg dst, HReg srcL, PPCRH* srcR ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_Shft;
i->Pin.Shft.op = op;
i->Pin.Shft.sz32 = sz32;
}
PPCInstr* PPCInstr_AddSubC ( Bool isAdd, Bool setC,
HReg dst, HReg srcL, HReg srcR ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_AddSubC;
i->Pin.AddSubC.isAdd = isAdd;
i->Pin.AddSubC.setC = setC;
}
PPCInstr* PPCInstr_Cmp ( Bool syned, Bool sz32,
UInt crfD, HReg srcL, PPCRH* srcR ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_Cmp;
i->Pin.Cmp.syned = syned;
i->Pin.Cmp.sz32 = sz32;
return i;
}
PPCInstr* PPCInstr_Unary ( PPCUnaryOp op, HReg dst, HReg src ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_Unary;
i->Pin.Unary.op = op;
i->Pin.Unary.dst = dst;
}
PPCInstr* PPCInstr_MulL ( Bool syned, Bool hi, Bool sz32,
HReg dst, HReg srcL, HReg srcR ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_MulL;
i->Pin.MulL.syned = syned;
i->Pin.MulL.hi = hi;
}
PPCInstr* PPCInstr_Div ( Bool extended, Bool syned, Bool sz32,
HReg dst, HReg srcL, HReg srcR ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_Div;
i->Pin.Div.extended = extended;
i->Pin.Div.syned = syned;
PPCInstr* PPCInstr_Call ( PPCCondCode cond,
Addr64 target, UInt argiregs, RetLoc rloc ) {
UInt mask;
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_Call;
i->Pin.Call.cond = cond;
i->Pin.Call.target = target;
}
PPCInstr* PPCInstr_XDirect ( Addr64 dstGA, PPCAMode* amCIA,
PPCCondCode cond, Bool toFastEP ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_XDirect;
i->Pin.XDirect.dstGA = dstGA;
i->Pin.XDirect.amCIA = amCIA;
}
PPCInstr* PPCInstr_XIndir ( HReg dstGA, PPCAMode* amCIA,
PPCCondCode cond ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_XIndir;
i->Pin.XIndir.dstGA = dstGA;
i->Pin.XIndir.amCIA = amCIA;
}
PPCInstr* PPCInstr_XAssisted ( HReg dstGA, PPCAMode* amCIA,
PPCCondCode cond, IRJumpKind jk ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_XAssisted;
i->Pin.XAssisted.dstGA = dstGA;
i->Pin.XAssisted.amCIA = amCIA;
}
PPCInstr* PPCInstr_CMov ( PPCCondCode cond,
HReg dst, PPCRI* src ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_CMov;
i->Pin.CMov.cond = cond;
i->Pin.CMov.src = src;
}
PPCInstr* PPCInstr_Load ( UChar sz,
HReg dst, PPCAMode* src, Bool mode64 ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_Load;
i->Pin.Load.sz = sz;
i->Pin.Load.src = src;
PPCInstr* PPCInstr_LoadL ( UChar sz,
HReg dst, HReg src, Bool mode64 )
{
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_LoadL;
i->Pin.LoadL.sz = sz;
i->Pin.LoadL.src = src;
}
PPCInstr* PPCInstr_Store ( UChar sz, PPCAMode* dst, HReg src,
Bool mode64 ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_Store;
i->Pin.Store.sz = sz;
i->Pin.Store.src = src;
return i;
}
PPCInstr* PPCInstr_StoreC ( UChar sz, HReg dst, HReg src, Bool mode64 ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_StoreC;
i->Pin.StoreC.sz = sz;
i->Pin.StoreC.src = src;
return i;
}
PPCInstr* PPCInstr_Set ( PPCCondCode cond, HReg dst ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_Set;
i->Pin.Set.cond = cond;
i->Pin.Set.dst = dst;
}
PPCInstr* PPCInstr_MfCR ( HReg dst )
{
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_MfCR;
i->Pin.MfCR.dst = dst;
return i;
}
PPCInstr* PPCInstr_MFence ( void )
{
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_MFence;
return i;
}
PPCInstr* PPCInstr_FpUnary ( PPCFpOp op, HReg dst, HReg src ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_FpUnary;
i->Pin.FpUnary.op = op;
i->Pin.FpUnary.dst = dst;
}
PPCInstr* PPCInstr_FpBinary ( PPCFpOp op, HReg dst,
HReg srcL, HReg srcR ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_FpBinary;
i->Pin.FpBinary.op = op;
i->Pin.FpBinary.dst = dst;
PPCInstr* PPCInstr_FpMulAcc ( PPCFpOp op, HReg dst, HReg srcML,
HReg srcMR, HReg srcAcc )
{
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_FpMulAcc;
i->Pin.FpMulAcc.op = op;
i->Pin.FpMulAcc.dst = dst;
}
PPCInstr* PPCInstr_FpLdSt ( Bool isLoad, UChar sz,
HReg reg, PPCAMode* addr ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_FpLdSt;
i->Pin.FpLdSt.isLoad = isLoad;
i->Pin.FpLdSt.sz = sz;
}
PPCInstr* PPCInstr_FpSTFIW ( HReg addr, HReg data )
{
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_FpSTFIW;
i->Pin.FpSTFIW.addr = addr;
i->Pin.FpSTFIW.data = data;
return i;
}
PPCInstr* PPCInstr_FpRSP ( HReg dst, HReg src ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_FpRSP;
i->Pin.FpRSP.dst = dst;
i->Pin.FpRSP.src = src;
return i;
}
PPCInstr* PPCInstr_Dfp64Unary(PPCFpOp op, HReg dst, HReg src) {
- PPCInstr* i = LibVEX_Alloc( sizeof(PPCInstr) );
+ PPCInstr* i = LibVEX_Alloc_inline( sizeof(PPCInstr) );
i->tag = Pin_Dfp64Unary;
i->Pin.Dfp64Unary.op = op;
i->Pin.Dfp64Unary.dst = dst;
return i;
}
PPCInstr* PPCInstr_Dfp64Binary(PPCFpOp op, HReg dst, HReg srcL, HReg srcR) {
- PPCInstr* i = LibVEX_Alloc( sizeof(PPCInstr) );
+ PPCInstr* i = LibVEX_Alloc_inline( sizeof(PPCInstr) );
i->tag = Pin_Dfp64Binary;
i->Pin.Dfp64Binary.op = op;
i->Pin.Dfp64Binary.dst = dst;
return i;
}
PPCInstr* PPCInstr_DfpShift ( PPCFpOp op, HReg dst, HReg src, PPCRI* shift ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_DfpShift;
i->Pin.DfpShift.op = op;
i->Pin.DfpShift.shift = shift;
}
PPCInstr* PPCInstr_Dfp128Unary(PPCFpOp op, HReg dst_hi, HReg dst_lo,
HReg src_hi, HReg src_lo) {
- PPCInstr* i = LibVEX_Alloc( sizeof(PPCInstr) );
+ PPCInstr* i = LibVEX_Alloc_inline( sizeof(PPCInstr) );
i->tag = Pin_Dfp128Unary;
i->Pin.Dfp128Unary.op = op;
i->Pin.Dfp128Unary.dst_hi = dst_hi;
PPCInstr* PPCInstr_Dfp128Binary(PPCFpOp op, HReg dst_hi, HReg dst_lo,
HReg srcR_hi, HReg srcR_lo) {
/* dst is used to pass the srcL argument and return the result */
- PPCInstr* i = LibVEX_Alloc( sizeof(PPCInstr) );
+ PPCInstr* i = LibVEX_Alloc_inline( sizeof(PPCInstr) );
i->tag = Pin_Dfp128Binary;
i->Pin.Dfp128Binary.op = op;
i->Pin.Dfp128Binary.dst_hi = dst_hi;
PPCInstr* PPCInstr_DfpShift128 ( PPCFpOp op, HReg dst_hi, HReg dst_lo,
HReg src_hi, HReg src_lo,
PPCRI* shift ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_DfpShift128;
i->Pin.DfpShift128.op = op;
i->Pin.DfpShift128.shift = shift;
return i;
}
PPCInstr* PPCInstr_DfpRound ( HReg dst, HReg src, PPCRI* r_rmc ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_DfpRound;
i->Pin.DfpRound.dst = dst;
i->Pin.DfpRound.src = src;
}
PPCInstr* PPCInstr_DfpRound128 ( HReg dst_hi, HReg dst_lo, HReg src_hi,
HReg src_lo, PPCRI* r_rmc ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_DfpRound128;
i->Pin.DfpRound128.dst_hi = dst_hi;
i->Pin.DfpRound128.dst_lo = dst_lo;
}
PPCInstr* PPCInstr_DfpQuantize ( PPCFpOp op, HReg dst, HReg srcL, HReg srcR,
PPCRI* rmc ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_DfpQuantize;
i->Pin.DfpQuantize.op = op;
i->Pin.DfpQuantize.dst = dst;
PPCInstr* PPCInstr_DfpQuantize128 ( PPCFpOp op, HReg dst_hi, HReg dst_lo,
HReg src_hi, HReg src_lo, PPCRI* rmc ) {
/* dst is used to pass left operand in and return result */
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_DfpQuantize128;
i->Pin.DfpQuantize128.op = op;
i->Pin.DfpQuantize128.dst_hi = dst_hi;
}
PPCInstr* PPCInstr_DfpD128toD64 ( PPCFpOp op, HReg dst,
HReg src_hi, HReg src_lo ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_DfpD128toD64;
i->Pin.DfpD128toD64.op = op;
i->Pin.DfpD128toD64.src_hi = src_hi;
}
PPCInstr* PPCInstr_DfpI64StoD128 ( PPCFpOp op, HReg dst_hi,
HReg dst_lo, HReg src ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_DfpI64StoD128;
i->Pin.DfpI64StoD128.op = op;
i->Pin.DfpI64StoD128.src = src;
PPCInstr* PPCInstr_ExtractExpD128 ( PPCFpOp op, HReg dst,
HReg src_hi, HReg src_lo ) {
/* dst is used to pass the srcL argument */
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_ExtractExpD128;
i->Pin.ExtractExpD128.op = op;
i->Pin.ExtractExpD128.dst = dst;
PPCInstr* PPCInstr_InsertExpD128 ( PPCFpOp op, HReg dst_hi, HReg dst_lo,
HReg srcL, HReg srcR_hi, HReg srcR_lo ) {
/* dst is used to pass the srcL argument */
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_InsertExpD128;
i->Pin.InsertExpD128.op = op;
i->Pin.InsertExpD128.dst_hi = dst_hi;
return i;
}
PPCInstr* PPCInstr_Dfp64Cmp (/* UInt crfD,*/ HReg dst, HReg srcL, HReg srcR ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_Dfp64Cmp;
i->Pin.Dfp64Cmp.dst = dst;
i->Pin.Dfp64Cmp.srcL = srcL;
}
PPCInstr* PPCInstr_Dfp128Cmp ( HReg dst, HReg srcL_hi, HReg srcL_lo,
HReg srcR_hi, HReg srcR_lo ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_Dfp128Cmp;
i->Pin.Dfp128Cmp.dst = dst;
i->Pin.Dfp128Cmp.srcL_hi = srcL_hi;
}
PPCInstr* PPCInstr_EvCheck ( PPCAMode* amCounter,
PPCAMode* amFailAddr ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_EvCheck;
i->Pin.EvCheck.amCounter = amCounter;
i->Pin.EvCheck.amFailAddr = amFailAddr;
return i;
}
PPCInstr* PPCInstr_ProfInc ( void ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_ProfInc;
return i;
}
default:
vpanic("PPCInstr_FpCftI(ppc_host)");
}
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_FpCftI;
i->Pin.FpCftI.fromI = fromI;
i->Pin.FpCftI.int32 = int32;
return i;
}
PPCInstr* PPCInstr_FpCMov ( PPCCondCode cond, HReg dst, HReg src ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_FpCMov;
i->Pin.FpCMov.cond = cond;
i->Pin.FpCMov.dst = dst;
return i;
}
PPCInstr* PPCInstr_FpLdFPSCR ( HReg src, Bool dfp_rm ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_FpLdFPSCR;
i->Pin.FpLdFPSCR.src = src;
i->Pin.FpLdFPSCR.dfp_rm = dfp_rm ? 1 : 0;
return i;
}
PPCInstr* PPCInstr_FpCmp ( HReg dst, HReg srcL, HReg srcR ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_FpCmp;
i->Pin.FpCmp.dst = dst;
i->Pin.FpCmp.srcL = srcL;
/* Read/Write Link Register */
PPCInstr* PPCInstr_RdWrLR ( Bool wrLR, HReg gpr ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_RdWrLR;
i->Pin.RdWrLR.wrLR = wrLR;
i->Pin.RdWrLR.gpr = gpr;
/* AltiVec */
PPCInstr* PPCInstr_AvLdSt ( Bool isLoad, UChar sz,
HReg reg, PPCAMode* addr ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_AvLdSt;
i->Pin.AvLdSt.isLoad = isLoad;
i->Pin.AvLdSt.sz = sz;
return i;
}
PPCInstr* PPCInstr_AvUnary ( PPCAvOp op, HReg dst, HReg src ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_AvUnary;
i->Pin.AvUnary.op = op;
i->Pin.AvUnary.dst = dst;
}
PPCInstr* PPCInstr_AvBinary ( PPCAvOp op, HReg dst,
HReg srcL, HReg srcR ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_AvBinary;
i->Pin.AvBinary.op = op;
i->Pin.AvBinary.dst = dst;
}
PPCInstr* PPCInstr_AvBin8x16 ( PPCAvOp op, HReg dst,
HReg srcL, HReg srcR ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_AvBin8x16;
i->Pin.AvBin8x16.op = op;
i->Pin.AvBin8x16.dst = dst;
}
PPCInstr* PPCInstr_AvBin16x8 ( PPCAvOp op, HReg dst,
HReg srcL, HReg srcR ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_AvBin16x8;
i->Pin.AvBin16x8.op = op;
i->Pin.AvBin16x8.dst = dst;
}
PPCInstr* PPCInstr_AvBin32x4 ( PPCAvOp op, HReg dst,
HReg srcL, HReg srcR ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_AvBin32x4;
i->Pin.AvBin32x4.op = op;
i->Pin.AvBin32x4.dst = dst;
}
PPCInstr* PPCInstr_AvBin64x2 ( PPCAvOp op, HReg dst,
HReg srcL, HReg srcR ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_AvBin64x2;
i->Pin.AvBin64x2.op = op;
i->Pin.AvBin64x2.dst = dst;
PPCInstr* PPCInstr_AvBin32Fx4 ( PPCAvFpOp op, HReg dst,
HReg srcL, HReg srcR ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_AvBin32Fx4;
i->Pin.AvBin32Fx4.op = op;
i->Pin.AvBin32Fx4.dst = dst;
return i;
}
PPCInstr* PPCInstr_AvUn32Fx4 ( PPCAvFpOp op, HReg dst, HReg src ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_AvUn32Fx4;
i->Pin.AvUn32Fx4.op = op;
i->Pin.AvUn32Fx4.dst = dst;
return i;
}
PPCInstr* PPCInstr_AvPerm ( HReg dst, HReg srcL, HReg srcR, HReg ctl ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_AvPerm;
i->Pin.AvPerm.dst = dst;
i->Pin.AvPerm.srcL = srcL;
}
PPCInstr* PPCInstr_AvSel ( HReg ctl, HReg dst, HReg srcL, HReg srcR ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_AvSel;
i->Pin.AvSel.ctl = ctl;
i->Pin.AvSel.dst = dst;
return i;
}
PPCInstr* PPCInstr_AvSh ( Bool shLeft, HReg dst, PPCAMode* addr ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_AvSh;
i->Pin.AvSh.shLeft = shLeft;
i->Pin.AvSh.dst = dst;
}
PPCInstr* PPCInstr_AvShlDbl ( UChar shift, HReg dst,
HReg srcL, HReg srcR ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_AvShlDbl;
i->Pin.AvShlDbl.shift = shift;
i->Pin.AvShlDbl.dst = dst;
return i;
}
PPCInstr* PPCInstr_AvSplat ( UChar sz, HReg dst, PPCVI5s* src ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_AvSplat;
i->Pin.AvSplat.sz = sz;
i->Pin.AvSplat.dst = dst;
return i;
}
PPCInstr* PPCInstr_AvCMov ( PPCCondCode cond, HReg dst, HReg src ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_AvCMov;
i->Pin.AvCMov.cond = cond;
i->Pin.AvCMov.dst = dst;
return i;
}
PPCInstr* PPCInstr_AvLdVSCR ( HReg src ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_AvLdVSCR;
i->Pin.AvLdVSCR.src = src;
return i;
}
PPCInstr* PPCInstr_AvCipherV128Unary ( PPCAvOp op, HReg dst, HReg src ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_AvCipherV128Unary;
i->Pin.AvCipherV128Unary.op = op;
i->Pin.AvCipherV128Unary.dst = dst;
}
PPCInstr* PPCInstr_AvCipherV128Binary ( PPCAvOp op, HReg dst,
HReg srcL, HReg srcR ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_AvCipherV128Binary;
i->Pin.AvCipherV128Binary.op = op;
i->Pin.AvCipherV128Binary.dst = dst;
}
PPCInstr* PPCInstr_AvHashV128Binary ( PPCAvOp op, HReg dst,
HReg src, PPCRI* s_field ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_AvHashV128Binary;
i->Pin.AvHashV128Binary.op = op;
i->Pin.AvHashV128Binary.dst = dst;
}
PPCInstr* PPCInstr_AvBCDV128Trinary ( PPCAvOp op, HReg dst,
HReg src1, HReg src2, PPCRI* ps ) {
- PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr));
+ PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
i->tag = Pin_AvBCDV128Trinary;
i->Pin.AvBCDV128Trinary.op = op;
i->Pin.AvBCDV128Trinary.dst = dst;
IEndianess = Iend_LE;
/* Make up an initial environment to use. */
- env = LibVEX_Alloc(sizeof(ISelEnv));
+ env = LibVEX_Alloc_inline(sizeof(ISelEnv));
env->vreg_ctr = 0;
/* Are we being ppc32 or ppc64? */
* for supporting I128 in 32-bit mode
*/
env->n_vregmap = bb->tyenv->types_used;
- env->vregmapLo = LibVEX_Alloc(env->n_vregmap * sizeof(HReg));
- env->vregmapMedLo = LibVEX_Alloc(env->n_vregmap * sizeof(HReg));
+ env->vregmapLo = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
+ env->vregmapMedLo = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
if (mode64) {
env->vregmapMedHi = NULL;
env->vregmapHi = NULL;
} else {
- env->vregmapMedHi = LibVEX_Alloc(env->n_vregmap * sizeof(HReg));
- env->vregmapHi = LibVEX_Alloc(env->n_vregmap * sizeof(HReg));
+ env->vregmapMedHi = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
+ env->vregmapHi = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
}
/* and finally ... */
+ 16 /* FPRs */
;
- *arr = LibVEX_Alloc(*nregs * sizeof(HReg));
+ *arr = LibVEX_Alloc_inline(*nregs * sizeof(HReg));
i = 0;
s390_amode *
s390_amode_b12(Int d, HReg b)
{
- s390_amode *am = LibVEX_Alloc(sizeof(s390_amode));
+ s390_amode *am = LibVEX_Alloc_inline(sizeof(s390_amode));
vassert(fits_unsigned_12bit(d));
s390_amode *
s390_amode_b20(Int d, HReg b)
{
- s390_amode *am = LibVEX_Alloc(sizeof(s390_amode));
+ s390_amode *am = LibVEX_Alloc_inline(sizeof(s390_amode));
vassert(fits_signed_20bit(d));
s390_amode *
s390_amode_bx12(Int d, HReg b, HReg x)
{
- s390_amode *am = LibVEX_Alloc(sizeof(s390_amode));
+ s390_amode *am = LibVEX_Alloc_inline(sizeof(s390_amode));
vassert(fits_unsigned_12bit(d));
vassert(hregNumber(b) != 0);
s390_amode *
s390_amode_bx20(Int d, HReg b, HReg x)
{
- s390_amode *am = LibVEX_Alloc(sizeof(s390_amode));
+ s390_amode *am = LibVEX_Alloc_inline(sizeof(s390_amode));
vassert(fits_signed_20bit(d));
vassert(hregNumber(b) != 0);
s390_insn *
s390_insn_load(UChar size, HReg dst, s390_amode *src)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
insn->tag = S390_INSN_LOAD;
insn->size = size;
s390_insn *
s390_insn_store(UChar size, s390_amode *dst, HReg src)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
insn->tag = S390_INSN_STORE;
insn->size = size;
s390_insn *
s390_insn_move(UChar size, HReg dst, HReg src)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
insn->tag = S390_INSN_MOVE;
insn->size = size;
s390_insn *
s390_insn_memcpy(UChar size, s390_amode *dst, s390_amode *src)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
/* This insn will be mapped to MVC which requires base register
plus 12-bit displacement */
s390_insn *
s390_insn_cond_move(UChar size, s390_cc_t cond, HReg dst, s390_opnd_RMI src)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
insn->tag = S390_INSN_COND_MOVE;
insn->size = size;
s390_insn *
s390_insn_load_immediate(UChar size, HReg dst, ULong value)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
insn->tag = S390_INSN_LOAD_IMMEDIATE;
insn->size = size;
s390_insn *
s390_insn_alu(UChar size, s390_alu_t tag, HReg dst, s390_opnd_RMI op2)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
insn->tag = S390_INSN_ALU;
insn->size = size;
s390_insn_mul(UChar size, HReg dst_hi, HReg dst_lo, s390_opnd_RMI op2,
Bool signed_multiply)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(! hregIsVirtual(dst_hi));
vassert(! hregIsVirtual(dst_lo));
s390_insn_div(UChar size, HReg op1_hi, HReg op1_lo, s390_opnd_RMI op2,
Bool signed_divide)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(size == 4 || size == 8);
vassert(! hregIsVirtual(op1_hi));
s390_insn *
s390_insn_divs(UChar size, HReg rem, HReg op1, s390_opnd_RMI op2)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(size == 8);
vassert(! hregIsVirtual(op1));
s390_insn *
s390_insn_clz(UChar size, HReg num_bits, HReg clobber, s390_opnd_RMI src)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(size == 8);
vassert(! hregIsVirtual(num_bits));
s390_insn *
s390_insn_unop(UChar size, s390_unop_t tag, HReg dst, s390_opnd_RMI opnd)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
insn->tag = S390_INSN_UNOP;
insn->size = size;
s390_insn *
s390_insn_test(UChar size, s390_opnd_RMI src)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(size == 4 || size == 8);
s390_insn *
s390_insn_cc2bool(HReg dst, s390_cc_t cond)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
insn->tag = S390_INSN_CC2BOOL;
insn->size = 0; /* does not matter */
s390_insn *
s390_insn_cas(UChar size, HReg op1, s390_amode *op2, HReg op3, HReg old_mem)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(size == 4 || size == 8);
vassert(hregNumber(op2->x) == 0);
HReg op3_high, HReg op3_low, HReg old_mem_high, HReg old_mem_low,
HReg scratch)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
- s390_cdas *cdas = LibVEX_Alloc(sizeof(s390_cdas));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+ s390_cdas *cdas = LibVEX_Alloc_inline(sizeof(s390_cdas));
vassert(size == 4 || size == 8);
vassert(hregNumber(op2->x) == 0);
s390_insn_compare(UChar size, HReg src1, s390_opnd_RMI src2,
Bool signed_comparison)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(size == 4 || size == 8);
s390_insn_helper_call(s390_cc_t cond, Addr64 target, UInt num_args,
const HChar *name, RetLoc rloc)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
- s390_helper_call *helper_call = LibVEX_Alloc(sizeof(s390_helper_call));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+ s390_helper_call *helper_call = LibVEX_Alloc_inline(sizeof(s390_helper_call));
insn->tag = S390_INSN_HELPER_CALL;
insn->size = 0; /* does not matter */
s390_insn_bfp_triop(UChar size, s390_bfp_triop_t tag, HReg dst, HReg op2,
HReg op3)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(size == 4 || size == 8);
s390_insn *
s390_insn_bfp_binop(UChar size, s390_bfp_binop_t tag, HReg dst, HReg op2)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(size == 4 || size == 8);
s390_insn *
s390_insn_bfp_unop(UChar size, s390_bfp_unop_t tag, HReg dst, HReg op)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(size == 4 || size == 8);
s390_insn *
s390_insn_bfp_compare(UChar size, HReg dst, HReg op1, HReg op2)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(size == 4 || size == 8);
s390_insn_bfp_convert(UChar size, s390_bfp_conv_t tag, HReg dst, HReg op,
s390_bfp_round_t rounding_mode)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(size == 4 || size == 8);
s390_insn_bfp128_binop(UChar size, s390_bfp_binop_t tag, HReg dst_hi,
HReg dst_lo, HReg op2_hi, HReg op2_lo)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(size == 16);
vassert(is_valid_fp128_regpair(dst_hi, dst_lo));
s390_insn_bfp128_unop(UChar size, s390_bfp_unop_t tag, HReg dst_hi,
HReg dst_lo, HReg op_hi, HReg op_lo)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(size == 16);
vassert(is_valid_fp128_regpair(dst_hi, dst_lo));
s390_insn_bfp128_compare(UChar size, HReg dst, HReg op1_hi, HReg op1_lo,
HReg op2_hi, HReg op2_lo)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(size == 16);
vassert(is_valid_fp128_regpair(op1_hi, op1_lo));
HReg dst_lo, HReg op_hi, HReg op_lo,
s390_bfp_round_t rounding_mode)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
if (size == 16) {
/* From smaller size to 16 bytes */
s390_insn_dfp_binop(UChar size, s390_dfp_binop_t tag, HReg dst, HReg op2,
HReg op3, s390_dfp_round_t rounding_mode)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
- s390_dfp_binop *dfp_binop = LibVEX_Alloc(sizeof(s390_dfp_binop));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+ s390_dfp_binop *dfp_binop = LibVEX_Alloc_inline(sizeof(s390_dfp_binop));
vassert(size == 8);
s390_insn *
s390_insn_dfp_unop(UChar size, s390_dfp_unop_t tag, HReg dst, HReg op)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(size == 8);
s390_insn_dfp_intop(UChar size, s390_dfp_intop_t tag, HReg dst, HReg op2,
HReg op3)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(size == 8);
s390_insn_dfp_compare(UChar size, s390_dfp_cmp_t tag, HReg dst,
HReg op1, HReg op2)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(size == 8);
s390_insn_dfp_convert(UChar size, s390_dfp_conv_t tag, HReg dst, HReg op,
s390_dfp_round_t rounding_mode)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(size == 4 || size == 8);
s390_insn_dfp_reround(UChar size, HReg dst, HReg op2, HReg op3,
s390_dfp_round_t rounding_mode)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(size == 8);
s390_insn_fp_convert(UChar size, s390_fp_conv_t tag, HReg dst, HReg op,
HReg r1, s390_dfp_round_t rounding_mode)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
- s390_fp_convert *fp_convert = LibVEX_Alloc(sizeof(s390_fp_convert));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+ s390_fp_convert *fp_convert = LibVEX_Alloc_inline(sizeof(s390_fp_convert));
vassert(size == 4 || size == 8);
HReg dst_lo, HReg op_hi, HReg op_lo, HReg r1,
s390_dfp_round_t rounding_mode)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
- s390_fp_convert *fp_convert = LibVEX_Alloc(sizeof(s390_fp_convert));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+ s390_fp_convert *fp_convert = LibVEX_Alloc_inline(sizeof(s390_fp_convert));
vassert(size == 16);
HReg dst_lo, HReg op2_hi, HReg op2_lo, HReg op3_hi,
HReg op3_lo, s390_dfp_round_t rounding_mode)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
- s390_dfp_binop *dfp_binop = LibVEX_Alloc(sizeof(s390_dfp_binop));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+ s390_dfp_binop *dfp_binop = LibVEX_Alloc_inline(sizeof(s390_dfp_binop));
vassert(size == 16);
vassert(is_valid_fp128_regpair(dst_hi, dst_lo));
s390_insn_dfp128_unop(UChar size, s390_dfp_unop_t tag, HReg dst,
HReg op_hi, HReg op_lo)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
/* destination is an 8 byte integer value */
vassert(size == 8);
s390_insn_dfp128_intop(UChar size, s390_dfp_intop_t tag, HReg dst_hi,
HReg dst_lo, HReg op2, HReg op3_hi, HReg op3_lo)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(size == 16);
vassert(is_valid_fp128_regpair(dst_hi, dst_lo));
s390_insn_dfp128_compare(UChar size, s390_dfp_cmp_t tag, HReg dst, HReg op1_hi,
HReg op1_lo, HReg op2_hi, HReg op2_lo)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(size == 16);
vassert(is_valid_fp128_regpair(op1_hi, op1_lo));
HReg dst_lo, HReg op_hi, HReg op_lo,
s390_dfp_round_t rounding_mode)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
if (size == 16) {
/* From smaller size to 16 bytes */
HReg op3_hi, HReg op3_lo,
s390_dfp_round_t rounding_mode)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(size == 16);
vassert(is_valid_fp128_regpair(dst_hi, dst_lo));
s390_insn *
s390_insn_mfence(void)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
insn->tag = S390_INSN_MFENCE;
insn->size = 0; /* not needed */
s390_insn *
s390_insn_mimm(UChar size, s390_amode *dst, ULong value)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
/* This insn will be mapped to insns that require base register
plus 12-bit displacement */
s390_insn *
s390_insn_madd(UChar size, s390_amode *dst, UChar delta, ULong value)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(size == 4 || size == 8);
{
vassert(size == 4);
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
insn->tag = S390_INSN_SET_FPC_BFPRM;
insn->size = size;
{
vassert(size == 4);
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
insn->tag = S390_INSN_SET_FPC_DFPRM;
insn->size = size;
s390_insn_xdirect(s390_cc_t cond, Addr64 dst, s390_amode *guest_IA,
Bool to_fast_entry)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(guest_IA->tag == S390_AMODE_B12);
s390_insn *
s390_insn_xindir(s390_cc_t cond, HReg dst, s390_amode *guest_IA)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(guest_IA->tag == S390_AMODE_B12);
s390_insn_xassisted(s390_cc_t cond, HReg dst, s390_amode *guest_IA,
IRJumpKind kind)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(guest_IA->tag == S390_AMODE_B12);
s390_insn *
s390_insn_evcheck(s390_amode *counter, s390_amode *fail_addr)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
vassert(counter->tag == S390_AMODE_B12);
vassert(fail_addr->tag == S390_AMODE_B12);
s390_insn *
s390_insn_profinc(void)
{
- s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
insn->tag = S390_INSN_PROFINC;
insn->size = 0; /* does not matter */
vassert(archinfo_host->endness == VexEndnessBE);
/* Make up an initial environment to use. */
- env = LibVEX_Alloc(sizeof(ISelEnv));
+ env = LibVEX_Alloc_inline(sizeof(ISelEnv));
env->vreg_ctr = 0;
/* Set up output code array. */
vassert(bb->tyenv->types_used >= 0);
env->n_vregmap = bb->tyenv->types_used;
- env->vregmap = LibVEX_Alloc(env->n_vregmap * sizeof(HReg));
- env->vregmapHI = LibVEX_Alloc(env->n_vregmap * sizeof(HReg));
+ env->vregmap = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
+ env->vregmapHI = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
env->previous_bfp_rounding_mode = NULL;
env->previous_dfp_rounding_mode = NULL;
void getAllocableRegs_X86 ( Int* nregs, HReg** arr )
{
*nregs = 20;
- *arr = LibVEX_Alloc(*nregs * sizeof(HReg));
+ *arr = LibVEX_Alloc_inline(*nregs * sizeof(HReg));
(*arr)[0] = hregX86_EAX();
(*arr)[1] = hregX86_EBX();
(*arr)[2] = hregX86_ECX();
/* --------- X86AMode: memory address expressions. --------- */
X86AMode* X86AMode_IR ( UInt imm32, HReg reg ) {
- X86AMode* am = LibVEX_Alloc(sizeof(X86AMode));
+ X86AMode* am = LibVEX_Alloc_inline(sizeof(X86AMode));
am->tag = Xam_IR;
am->Xam.IR.imm = imm32;
am->Xam.IR.reg = reg;
return am;
}
X86AMode* X86AMode_IRRS ( UInt imm32, HReg base, HReg indEx, Int shift ) {
- X86AMode* am = LibVEX_Alloc(sizeof(X86AMode));
+ X86AMode* am = LibVEX_Alloc_inline(sizeof(X86AMode));
am->tag = Xam_IRRS;
am->Xam.IRRS.imm = imm32;
am->Xam.IRRS.base = base;
/* --------- Operand, which can be reg, immediate or memory. --------- */
X86RMI* X86RMI_Imm ( UInt imm32 ) {
- X86RMI* op = LibVEX_Alloc(sizeof(X86RMI));
+ X86RMI* op = LibVEX_Alloc_inline(sizeof(X86RMI));
op->tag = Xrmi_Imm;
op->Xrmi.Imm.imm32 = imm32;
return op;
}
X86RMI* X86RMI_Reg ( HReg reg ) {
- X86RMI* op = LibVEX_Alloc(sizeof(X86RMI));
+ X86RMI* op = LibVEX_Alloc_inline(sizeof(X86RMI));
op->tag = Xrmi_Reg;
op->Xrmi.Reg.reg = reg;
return op;
}
X86RMI* X86RMI_Mem ( X86AMode* am ) {
- X86RMI* op = LibVEX_Alloc(sizeof(X86RMI));
+ X86RMI* op = LibVEX_Alloc_inline(sizeof(X86RMI));
op->tag = Xrmi_Mem;
op->Xrmi.Mem.am = am;
return op;
/* --------- Operand, which can be reg or immediate only. --------- */
X86RI* X86RI_Imm ( UInt imm32 ) {
- X86RI* op = LibVEX_Alloc(sizeof(X86RI));
+ X86RI* op = LibVEX_Alloc_inline(sizeof(X86RI));
op->tag = Xri_Imm;
op->Xri.Imm.imm32 = imm32;
return op;
}
X86RI* X86RI_Reg ( HReg reg ) {
- X86RI* op = LibVEX_Alloc(sizeof(X86RI));
+ X86RI* op = LibVEX_Alloc_inline(sizeof(X86RI));
op->tag = Xri_Reg;
op->Xri.Reg.reg = reg;
return op;
/* --------- Operand, which can be reg or memory only. --------- */
X86RM* X86RM_Reg ( HReg reg ) {
- X86RM* op = LibVEX_Alloc(sizeof(X86RM));
+ X86RM* op = LibVEX_Alloc_inline(sizeof(X86RM));
op->tag = Xrm_Reg;
op->Xrm.Reg.reg = reg;
return op;
}
X86RM* X86RM_Mem ( X86AMode* am ) {
- X86RM* op = LibVEX_Alloc(sizeof(X86RM));
+ X86RM* op = LibVEX_Alloc_inline(sizeof(X86RM));
op->tag = Xrm_Mem;
op->Xrm.Mem.am = am;
return op;
}
X86Instr* X86Instr_Alu32R ( X86AluOp op, X86RMI* src, HReg dst ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_Alu32R;
i->Xin.Alu32R.op = op;
i->Xin.Alu32R.src = src;
return i;
}
X86Instr* X86Instr_Alu32M ( X86AluOp op, X86RI* src, X86AMode* dst ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_Alu32M;
i->Xin.Alu32M.op = op;
i->Xin.Alu32M.src = src;
return i;
}
X86Instr* X86Instr_Sh32 ( X86ShiftOp op, UInt src, HReg dst ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_Sh32;
i->Xin.Sh32.op = op;
i->Xin.Sh32.src = src;
return i;
}
X86Instr* X86Instr_Test32 ( UInt imm32, X86RM* dst ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_Test32;
i->Xin.Test32.imm32 = imm32;
i->Xin.Test32.dst = dst;
return i;
}
X86Instr* X86Instr_Unary32 ( X86UnaryOp op, HReg dst ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_Unary32;
i->Xin.Unary32.op = op;
i->Xin.Unary32.dst = dst;
return i;
}
X86Instr* X86Instr_Lea32 ( X86AMode* am, HReg dst ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_Lea32;
i->Xin.Lea32.am = am;
i->Xin.Lea32.dst = dst;
return i;
}
X86Instr* X86Instr_MulL ( Bool syned, X86RM* src ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_MulL;
i->Xin.MulL.syned = syned;
i->Xin.MulL.src = src;
return i;
}
X86Instr* X86Instr_Div ( Bool syned, X86RM* src ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_Div;
i->Xin.Div.syned = syned;
i->Xin.Div.src = src;
return i;
}
X86Instr* X86Instr_Sh3232 ( X86ShiftOp op, UInt amt, HReg src, HReg dst ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_Sh3232;
i->Xin.Sh3232.op = op;
i->Xin.Sh3232.amt = amt;
return i;
}
X86Instr* X86Instr_Push( X86RMI* src ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_Push;
i->Xin.Push.src = src;
return i;
}
X86Instr* X86Instr_Call ( X86CondCode cond, Addr32 target, Int regparms,
RetLoc rloc ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_Call;
i->Xin.Call.cond = cond;
i->Xin.Call.target = target;
}
X86Instr* X86Instr_XDirect ( Addr32 dstGA, X86AMode* amEIP,
X86CondCode cond, Bool toFastEP ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_XDirect;
i->Xin.XDirect.dstGA = dstGA;
i->Xin.XDirect.amEIP = amEIP;
}
X86Instr* X86Instr_XIndir ( HReg dstGA, X86AMode* amEIP,
X86CondCode cond ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_XIndir;
i->Xin.XIndir.dstGA = dstGA;
i->Xin.XIndir.amEIP = amEIP;
}
X86Instr* X86Instr_XAssisted ( HReg dstGA, X86AMode* amEIP,
X86CondCode cond, IRJumpKind jk ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_XAssisted;
i->Xin.XAssisted.dstGA = dstGA;
i->Xin.XAssisted.amEIP = amEIP;
return i;
}
X86Instr* X86Instr_CMov32 ( X86CondCode cond, X86RM* src, HReg dst ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_CMov32;
i->Xin.CMov32.cond = cond;
i->Xin.CMov32.src = src;
}
X86Instr* X86Instr_LoadEX ( UChar szSmall, Bool syned,
X86AMode* src, HReg dst ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_LoadEX;
i->Xin.LoadEX.szSmall = szSmall;
i->Xin.LoadEX.syned = syned;
return i;
}
X86Instr* X86Instr_Store ( UChar sz, HReg src, X86AMode* dst ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_Store;
i->Xin.Store.sz = sz;
i->Xin.Store.src = src;
return i;
}
X86Instr* X86Instr_Set32 ( X86CondCode cond, HReg dst ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_Set32;
i->Xin.Set32.cond = cond;
i->Xin.Set32.dst = dst;
return i;
}
X86Instr* X86Instr_Bsfr32 ( Bool isFwds, HReg src, HReg dst ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_Bsfr32;
i->Xin.Bsfr32.isFwds = isFwds;
i->Xin.Bsfr32.src = src;
return i;
}
X86Instr* X86Instr_MFence ( UInt hwcaps ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_MFence;
i->Xin.MFence.hwcaps = hwcaps;
vassert(0 == (hwcaps & ~(VEX_HWCAPS_X86_MMXEXT
return i;
}
X86Instr* X86Instr_ACAS ( X86AMode* addr, UChar sz ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_ACAS;
i->Xin.ACAS.addr = addr;
i->Xin.ACAS.sz = sz;
return i;
}
X86Instr* X86Instr_DACAS ( X86AMode* addr ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_DACAS;
i->Xin.DACAS.addr = addr;
return i;
}
X86Instr* X86Instr_FpUnary ( X86FpOp op, HReg src, HReg dst ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_FpUnary;
i->Xin.FpUnary.op = op;
i->Xin.FpUnary.src = src;
return i;
}
X86Instr* X86Instr_FpBinary ( X86FpOp op, HReg srcL, HReg srcR, HReg dst ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_FpBinary;
i->Xin.FpBinary.op = op;
i->Xin.FpBinary.srcL = srcL;
return i;
}
X86Instr* X86Instr_FpLdSt ( Bool isLoad, UChar sz, HReg reg, X86AMode* addr ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_FpLdSt;
i->Xin.FpLdSt.isLoad = isLoad;
i->Xin.FpLdSt.sz = sz;
}
X86Instr* X86Instr_FpLdStI ( Bool isLoad, UChar sz,
HReg reg, X86AMode* addr ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_FpLdStI;
i->Xin.FpLdStI.isLoad = isLoad;
i->Xin.FpLdStI.sz = sz;
return i;
}
X86Instr* X86Instr_Fp64to32 ( HReg src, HReg dst ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_Fp64to32;
i->Xin.Fp64to32.src = src;
i->Xin.Fp64to32.dst = dst;
return i;
}
X86Instr* X86Instr_FpCMov ( X86CondCode cond, HReg src, HReg dst ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_FpCMov;
i->Xin.FpCMov.cond = cond;
i->Xin.FpCMov.src = src;
return i;
}
X86Instr* X86Instr_FpLdCW ( X86AMode* addr ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_FpLdCW;
i->Xin.FpLdCW.addr = addr;
return i;
}
X86Instr* X86Instr_FpStSW_AX ( void ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_FpStSW_AX;
return i;
}
X86Instr* X86Instr_FpCmp ( HReg srcL, HReg srcR, HReg dst ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_FpCmp;
i->Xin.FpCmp.srcL = srcL;
i->Xin.FpCmp.srcR = srcR;
return i;
}
X86Instr* X86Instr_SseConst ( UShort con, HReg dst ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_SseConst;
i->Xin.SseConst.con = con;
i->Xin.SseConst.dst = dst;
return i;
}
X86Instr* X86Instr_SseLdSt ( Bool isLoad, HReg reg, X86AMode* addr ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_SseLdSt;
i->Xin.SseLdSt.isLoad = isLoad;
i->Xin.SseLdSt.reg = reg;
}
X86Instr* X86Instr_SseLdzLO ( Int sz, HReg reg, X86AMode* addr )
{
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_SseLdzLO;
i->Xin.SseLdzLO.sz = toUChar(sz);
i->Xin.SseLdzLO.reg = reg;
return i;
}
X86Instr* X86Instr_Sse32Fx4 ( X86SseOp op, HReg src, HReg dst ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_Sse32Fx4;
i->Xin.Sse32Fx4.op = op;
i->Xin.Sse32Fx4.src = src;
return i;
}
X86Instr* X86Instr_Sse32FLo ( X86SseOp op, HReg src, HReg dst ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_Sse32FLo;
i->Xin.Sse32FLo.op = op;
i->Xin.Sse32FLo.src = src;
return i;
}
X86Instr* X86Instr_Sse64Fx2 ( X86SseOp op, HReg src, HReg dst ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_Sse64Fx2;
i->Xin.Sse64Fx2.op = op;
i->Xin.Sse64Fx2.src = src;
return i;
}
X86Instr* X86Instr_Sse64FLo ( X86SseOp op, HReg src, HReg dst ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_Sse64FLo;
i->Xin.Sse64FLo.op = op;
i->Xin.Sse64FLo.src = src;
return i;
}
X86Instr* X86Instr_SseReRg ( X86SseOp op, HReg re, HReg rg ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_SseReRg;
i->Xin.SseReRg.op = op;
i->Xin.SseReRg.src = re;
return i;
}
X86Instr* X86Instr_SseCMov ( X86CondCode cond, HReg src, HReg dst ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_SseCMov;
i->Xin.SseCMov.cond = cond;
i->Xin.SseCMov.src = src;
return i;
}
X86Instr* X86Instr_SseShuf ( Int order, HReg src, HReg dst ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_SseShuf;
i->Xin.SseShuf.order = order;
i->Xin.SseShuf.src = src;
}
X86Instr* X86Instr_EvCheck ( X86AMode* amCounter,
X86AMode* amFailAddr ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_EvCheck;
i->Xin.EvCheck.amCounter = amCounter;
i->Xin.EvCheck.amFailAddr = amFailAddr;
return i;
}
X86Instr* X86Instr_ProfInc ( void ) {
- X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
+ X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
i->tag = Xin_ProfInc;
return i;
}
vassert(archinfo_host->endness == VexEndnessLE);
/* Make up an initial environment to use. */
- env = LibVEX_Alloc(sizeof(ISelEnv));
+ env = LibVEX_Alloc_inline(sizeof(ISelEnv));
env->vreg_ctr = 0;
/* Set up output code array. */
/* Make up an IRTemp -> virtual HReg mapping. This doesn't
change as we go along. */
env->n_vregmap = bb->tyenv->types_used;
- env->vregmap = LibVEX_Alloc(env->n_vregmap * sizeof(HReg));
- env->vregmapHI = LibVEX_Alloc(env->n_vregmap * sizeof(HReg));
+ env->vregmap = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
+ env->vregmapHI = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
/* and finally ... */
env->chainingAllowed = chainingAllowed;
IRConst* IRConst_U1 ( Bool bit )
{
- IRConst* c = LibVEX_Alloc(sizeof(IRConst));
+ IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst));
c->tag = Ico_U1;
c->Ico.U1 = bit;
/* call me paranoid; I don't care :-) */
}
IRConst* IRConst_U8 ( UChar u8 )
{
- IRConst* c = LibVEX_Alloc(sizeof(IRConst));
+ IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst));
c->tag = Ico_U8;
c->Ico.U8 = u8;
return c;
}
IRConst* IRConst_U16 ( UShort u16 )
{
- IRConst* c = LibVEX_Alloc(sizeof(IRConst));
+ IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst));
c->tag = Ico_U16;
c->Ico.U16 = u16;
return c;
}
IRConst* IRConst_U32 ( UInt u32 )
{
- IRConst* c = LibVEX_Alloc(sizeof(IRConst));
+ IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst));
c->tag = Ico_U32;
c->Ico.U32 = u32;
return c;
}
IRConst* IRConst_U64 ( ULong u64 )
{
- IRConst* c = LibVEX_Alloc(sizeof(IRConst));
+ IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst));
c->tag = Ico_U64;
c->Ico.U64 = u64;
return c;
}
IRConst* IRConst_F32 ( Float f32 )
{
- IRConst* c = LibVEX_Alloc(sizeof(IRConst));
+ IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst));
c->tag = Ico_F32;
c->Ico.F32 = f32;
return c;
}
IRConst* IRConst_F32i ( UInt f32i )
{
- IRConst* c = LibVEX_Alloc(sizeof(IRConst));
+ IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst));
c->tag = Ico_F32i;
c->Ico.F32i = f32i;
return c;
}
IRConst* IRConst_F64 ( Double f64 )
{
- IRConst* c = LibVEX_Alloc(sizeof(IRConst));
+ IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst));
c->tag = Ico_F64;
c->Ico.F64 = f64;
return c;
}
IRConst* IRConst_F64i ( ULong f64i )
{
- IRConst* c = LibVEX_Alloc(sizeof(IRConst));
+ IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst));
c->tag = Ico_F64i;
c->Ico.F64i = f64i;
return c;
}
IRConst* IRConst_V128 ( UShort con )
{
- IRConst* c = LibVEX_Alloc(sizeof(IRConst));
+ IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst));
c->tag = Ico_V128;
c->Ico.V128 = con;
return c;
}
IRConst* IRConst_V256 ( UInt con )
{
- IRConst* c = LibVEX_Alloc(sizeof(IRConst));
+ IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst));
c->tag = Ico_V256;
c->Ico.V256 = con;
return c;
IRCallee* mkIRCallee ( Int regparms, const HChar* name, void* addr )
{
- IRCallee* ce = LibVEX_Alloc(sizeof(IRCallee));
+ IRCallee* ce = LibVEX_Alloc_inline(sizeof(IRCallee));
ce->regparms = regparms;
ce->name = name;
ce->addr = addr;
IRRegArray* mkIRRegArray ( Int base, IRType elemTy, Int nElems )
{
- IRRegArray* arr = LibVEX_Alloc(sizeof(IRRegArray));
+ IRRegArray* arr = LibVEX_Alloc_inline(sizeof(IRRegArray));
arr->base = base;
arr->elemTy = elemTy;
arr->nElems = nElems;
/* Constructors -- IRExpr */
IRExpr* IRExpr_Binder ( Int binder ) {
- IRExpr* e = LibVEX_Alloc(sizeof(IRExpr));
+ IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr));
e->tag = Iex_Binder;
e->Iex.Binder.binder = binder;
return e;
}
IRExpr* IRExpr_Get ( Int off, IRType ty ) {
- IRExpr* e = LibVEX_Alloc(sizeof(IRExpr));
+ IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr));
e->tag = Iex_Get;
e->Iex.Get.offset = off;
e->Iex.Get.ty = ty;
return e;
}
IRExpr* IRExpr_GetI ( IRRegArray* descr, IRExpr* ix, Int bias ) {
- IRExpr* e = LibVEX_Alloc(sizeof(IRExpr));
+ IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr));
e->tag = Iex_GetI;
e->Iex.GetI.descr = descr;
e->Iex.GetI.ix = ix;
return e;
}
IRExpr* IRExpr_RdTmp ( IRTemp tmp ) {
- IRExpr* e = LibVEX_Alloc(sizeof(IRExpr));
+ IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr));
e->tag = Iex_RdTmp;
e->Iex.RdTmp.tmp = tmp;
return e;
}
IRExpr* IRExpr_Qop ( IROp op, IRExpr* arg1, IRExpr* arg2,
IRExpr* arg3, IRExpr* arg4 ) {
- IRExpr* e = LibVEX_Alloc(sizeof(IRExpr));
- IRQop* qop = LibVEX_Alloc(sizeof(IRQop));
+ IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr));
+ IRQop* qop = LibVEX_Alloc_inline(sizeof(IRQop));
qop->op = op;
qop->arg1 = arg1;
qop->arg2 = arg2;
}
IRExpr* IRExpr_Triop ( IROp op, IRExpr* arg1,
IRExpr* arg2, IRExpr* arg3 ) {
- IRExpr* e = LibVEX_Alloc(sizeof(IRExpr));
- IRTriop* triop = LibVEX_Alloc(sizeof(IRTriop));
+ IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr));
+ IRTriop* triop = LibVEX_Alloc_inline(sizeof(IRTriop));
triop->op = op;
triop->arg1 = arg1;
triop->arg2 = arg2;
return e;
}
IRExpr* IRExpr_Binop ( IROp op, IRExpr* arg1, IRExpr* arg2 ) {
- IRExpr* e = LibVEX_Alloc(sizeof(IRExpr));
+ IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr));
e->tag = Iex_Binop;
e->Iex.Binop.op = op;
e->Iex.Binop.arg1 = arg1;
return e;
}
IRExpr* IRExpr_Unop ( IROp op, IRExpr* arg ) {
- IRExpr* e = LibVEX_Alloc(sizeof(IRExpr));
+ IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr));
e->tag = Iex_Unop;
e->Iex.Unop.op = op;
e->Iex.Unop.arg = arg;
return e;
}
IRExpr* IRExpr_Load ( IREndness end, IRType ty, IRExpr* addr ) {
- IRExpr* e = LibVEX_Alloc(sizeof(IRExpr));
+ IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr));
e->tag = Iex_Load;
e->Iex.Load.end = end;
e->Iex.Load.ty = ty;
return e;
}
IRExpr* IRExpr_Const ( IRConst* con ) {
- IRExpr* e = LibVEX_Alloc(sizeof(IRExpr));
+ IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr));
e->tag = Iex_Const;
e->Iex.Const.con = con;
return e;
}
IRExpr* IRExpr_CCall ( IRCallee* cee, IRType retty, IRExpr** args ) {
- IRExpr* e = LibVEX_Alloc(sizeof(IRExpr));
+ IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr));
e->tag = Iex_CCall;
e->Iex.CCall.cee = cee;
e->Iex.CCall.retty = retty;
return e;
}
IRExpr* IRExpr_ITE ( IRExpr* cond, IRExpr* iftrue, IRExpr* iffalse ) {
- IRExpr* e = LibVEX_Alloc(sizeof(IRExpr));
+ IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr));
e->tag = Iex_ITE;
e->Iex.ITE.cond = cond;
e->Iex.ITE.iftrue = iftrue;
return e;
}
IRExpr* IRExpr_VECRET ( void ) {
- IRExpr* e = LibVEX_Alloc(sizeof(IRExpr));
+ IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr));
e->tag = Iex_VECRET;
return e;
}
IRExpr* IRExpr_BBPTR ( void ) {
- IRExpr* e = LibVEX_Alloc(sizeof(IRExpr));
+ IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr));
e->tag = Iex_BBPTR;
return e;
}
suitable for use as arg lists in clean/dirty helper calls. */
IRExpr** mkIRExprVec_0 ( void ) {
- IRExpr** vec = LibVEX_Alloc(1 * sizeof(IRExpr*));
+ IRExpr** vec = LibVEX_Alloc_inline(1 * sizeof(IRExpr*));
vec[0] = NULL;
return vec;
}
IRExpr** mkIRExprVec_1 ( IRExpr* arg1 ) {
- IRExpr** vec = LibVEX_Alloc(2 * sizeof(IRExpr*));
+ IRExpr** vec = LibVEX_Alloc_inline(2 * sizeof(IRExpr*));
vec[0] = arg1;
vec[1] = NULL;
return vec;
}
IRExpr** mkIRExprVec_2 ( IRExpr* arg1, IRExpr* arg2 ) {
- IRExpr** vec = LibVEX_Alloc(3 * sizeof(IRExpr*));
+ IRExpr** vec = LibVEX_Alloc_inline(3 * sizeof(IRExpr*));
vec[0] = arg1;
vec[1] = arg2;
vec[2] = NULL;
return vec;
}
IRExpr** mkIRExprVec_3 ( IRExpr* arg1, IRExpr* arg2, IRExpr* arg3 ) {
- IRExpr** vec = LibVEX_Alloc(4 * sizeof(IRExpr*));
+ IRExpr** vec = LibVEX_Alloc_inline(4 * sizeof(IRExpr*));
vec[0] = arg1;
vec[1] = arg2;
vec[2] = arg3;
}
IRExpr** mkIRExprVec_4 ( IRExpr* arg1, IRExpr* arg2, IRExpr* arg3,
IRExpr* arg4 ) {
- IRExpr** vec = LibVEX_Alloc(5 * sizeof(IRExpr*));
+ IRExpr** vec = LibVEX_Alloc_inline(5 * sizeof(IRExpr*));
vec[0] = arg1;
vec[1] = arg2;
vec[2] = arg3;
}
IRExpr** mkIRExprVec_5 ( IRExpr* arg1, IRExpr* arg2, IRExpr* arg3,
IRExpr* arg4, IRExpr* arg5 ) {
- IRExpr** vec = LibVEX_Alloc(6 * sizeof(IRExpr*));
+ IRExpr** vec = LibVEX_Alloc_inline(6 * sizeof(IRExpr*));
vec[0] = arg1;
vec[1] = arg2;
vec[2] = arg3;
}
IRExpr** mkIRExprVec_6 ( IRExpr* arg1, IRExpr* arg2, IRExpr* arg3,
IRExpr* arg4, IRExpr* arg5, IRExpr* arg6 ) {
- IRExpr** vec = LibVEX_Alloc(7 * sizeof(IRExpr*));
+ IRExpr** vec = LibVEX_Alloc_inline(7 * sizeof(IRExpr*));
vec[0] = arg1;
vec[1] = arg2;
vec[2] = arg3;
IRExpr** mkIRExprVec_7 ( IRExpr* arg1, IRExpr* arg2, IRExpr* arg3,
IRExpr* arg4, IRExpr* arg5, IRExpr* arg6,
IRExpr* arg7 ) {
- IRExpr** vec = LibVEX_Alloc(8 * sizeof(IRExpr*));
+ IRExpr** vec = LibVEX_Alloc_inline(8 * sizeof(IRExpr*));
vec[0] = arg1;
vec[1] = arg2;
vec[2] = arg3;
IRExpr** mkIRExprVec_8 ( IRExpr* arg1, IRExpr* arg2, IRExpr* arg3,
IRExpr* arg4, IRExpr* arg5, IRExpr* arg6,
IRExpr* arg7, IRExpr* arg8 ) {
- IRExpr** vec = LibVEX_Alloc(9 * sizeof(IRExpr*));
+ IRExpr** vec = LibVEX_Alloc_inline(9 * sizeof(IRExpr*));
vec[0] = arg1;
vec[1] = arg2;
vec[2] = arg3;
/* Constructors -- IRDirty */
IRDirty* emptyIRDirty ( void ) {
- IRDirty* d = LibVEX_Alloc(sizeof(IRDirty));
+ IRDirty* d = LibVEX_Alloc_inline(sizeof(IRDirty));
d->cee = NULL;
d->guard = NULL;
d->args = NULL;
IREndness end, IRExpr* addr,
IRExpr* expdHi, IRExpr* expdLo,
IRExpr* dataHi, IRExpr* dataLo ) {
- IRCAS* cas = LibVEX_Alloc(sizeof(IRCAS));
+ IRCAS* cas = LibVEX_Alloc_inline(sizeof(IRCAS));
cas->oldHi = oldHi;
cas->oldLo = oldLo;
cas->end = end;
IRPutI* mkIRPutI ( IRRegArray* descr, IRExpr* ix,
Int bias, IRExpr* data )
{
- IRPutI* puti = LibVEX_Alloc(sizeof(IRPutI));
+ IRPutI* puti = LibVEX_Alloc_inline(sizeof(IRPutI));
puti->descr = descr;
puti->ix = ix;
puti->bias = bias;
IRStoreG* mkIRStoreG ( IREndness end,
IRExpr* addr, IRExpr* data, IRExpr* guard )
{
- IRStoreG* sg = LibVEX_Alloc(sizeof(IRStoreG));
+ IRStoreG* sg = LibVEX_Alloc_inline(sizeof(IRStoreG));
sg->end = end;
sg->addr = addr;
sg->data = data;
IRLoadG* mkIRLoadG ( IREndness end, IRLoadGOp cvt,
IRTemp dst, IRExpr* addr, IRExpr* alt, IRExpr* guard )
{
- IRLoadG* lg = LibVEX_Alloc(sizeof(IRLoadG));
+ IRLoadG* lg = LibVEX_Alloc_inline(sizeof(IRLoadG));
lg->end = end;
lg->cvt = cvt;
lg->dst = dst;
return &static_closure;
}
IRStmt* IRStmt_IMark ( Addr addr, UInt len, UChar delta ) {
- IRStmt* s = LibVEX_Alloc(sizeof(IRStmt));
+ IRStmt* s = LibVEX_Alloc_inline(sizeof(IRStmt));
s->tag = Ist_IMark;
s->Ist.IMark.addr = addr;
s->Ist.IMark.len = len;
return s;
}
IRStmt* IRStmt_AbiHint ( IRExpr* base, Int len, IRExpr* nia ) {
- IRStmt* s = LibVEX_Alloc(sizeof(IRStmt));
+ IRStmt* s = LibVEX_Alloc_inline(sizeof(IRStmt));
s->tag = Ist_AbiHint;
s->Ist.AbiHint.base = base;
s->Ist.AbiHint.len = len;
return s;
}
IRStmt* IRStmt_Put ( Int off, IRExpr* data ) {
- IRStmt* s = LibVEX_Alloc(sizeof(IRStmt));
+ IRStmt* s = LibVEX_Alloc_inline(sizeof(IRStmt));
s->tag = Ist_Put;
s->Ist.Put.offset = off;
s->Ist.Put.data = data;
return s;
}
IRStmt* IRStmt_PutI ( IRPutI* details ) {
- IRStmt* s = LibVEX_Alloc(sizeof(IRStmt));
+ IRStmt* s = LibVEX_Alloc_inline(sizeof(IRStmt));
s->tag = Ist_PutI;
s->Ist.PutI.details = details;
return s;
}
IRStmt* IRStmt_WrTmp ( IRTemp tmp, IRExpr* data ) {
- IRStmt* s = LibVEX_Alloc(sizeof(IRStmt));
+ IRStmt* s = LibVEX_Alloc_inline(sizeof(IRStmt));
s->tag = Ist_WrTmp;
s->Ist.WrTmp.tmp = tmp;
s->Ist.WrTmp.data = data;
return s;
}
IRStmt* IRStmt_Store ( IREndness end, IRExpr* addr, IRExpr* data ) {
- IRStmt* s = LibVEX_Alloc(sizeof(IRStmt));
+ IRStmt* s = LibVEX_Alloc_inline(sizeof(IRStmt));
s->tag = Ist_Store;
s->Ist.Store.end = end;
s->Ist.Store.addr = addr;
}
IRStmt* IRStmt_StoreG ( IREndness end, IRExpr* addr, IRExpr* data,
IRExpr* guard ) {
- IRStmt* s = LibVEX_Alloc(sizeof(IRStmt));
+ IRStmt* s = LibVEX_Alloc_inline(sizeof(IRStmt));
s->tag = Ist_StoreG;
s->Ist.StoreG.details = mkIRStoreG(end, addr, data, guard);
vassert(end == Iend_LE || end == Iend_BE);
}
IRStmt* IRStmt_LoadG ( IREndness end, IRLoadGOp cvt, IRTemp dst,
IRExpr* addr, IRExpr* alt, IRExpr* guard ) {
- IRStmt* s = LibVEX_Alloc(sizeof(IRStmt));
+ IRStmt* s = LibVEX_Alloc_inline(sizeof(IRStmt));
s->tag = Ist_LoadG;
s->Ist.LoadG.details = mkIRLoadG(end, cvt, dst, addr, alt, guard);
return s;
}
IRStmt* IRStmt_CAS ( IRCAS* cas ) {
- IRStmt* s = LibVEX_Alloc(sizeof(IRStmt));
+ IRStmt* s = LibVEX_Alloc_inline(sizeof(IRStmt));
s->tag = Ist_CAS;
s->Ist.CAS.details = cas;
return s;
}
IRStmt* IRStmt_LLSC ( IREndness end,
IRTemp result, IRExpr* addr, IRExpr* storedata ) {
- IRStmt* s = LibVEX_Alloc(sizeof(IRStmt));
+ IRStmt* s = LibVEX_Alloc_inline(sizeof(IRStmt));
s->tag = Ist_LLSC;
s->Ist.LLSC.end = end;
s->Ist.LLSC.result = result;
}
IRStmt* IRStmt_Dirty ( IRDirty* d )
{
- IRStmt* s = LibVEX_Alloc(sizeof(IRStmt));
+ IRStmt* s = LibVEX_Alloc_inline(sizeof(IRStmt));
s->tag = Ist_Dirty;
s->Ist.Dirty.details = d;
return s;
}
IRStmt* IRStmt_MBE ( IRMBusEvent event )
{
- IRStmt* s = LibVEX_Alloc(sizeof(IRStmt));
+ IRStmt* s = LibVEX_Alloc_inline(sizeof(IRStmt));
s->tag = Ist_MBE;
s->Ist.MBE.event = event;
return s;
}
IRStmt* IRStmt_Exit ( IRExpr* guard, IRJumpKind jk, IRConst* dst,
Int offsIP ) {
- IRStmt* s = LibVEX_Alloc(sizeof(IRStmt));
+ IRStmt* s = LibVEX_Alloc_inline(sizeof(IRStmt));
s->tag = Ist_Exit;
s->Ist.Exit.guard = guard;
s->Ist.Exit.jk = jk;
IRTypeEnv* emptyIRTypeEnv ( void )
{
- IRTypeEnv* env = LibVEX_Alloc(sizeof(IRTypeEnv));
- env->types = LibVEX_Alloc(8 * sizeof(IRType));
+ IRTypeEnv* env = LibVEX_Alloc_inline(sizeof(IRTypeEnv));
+ env->types = LibVEX_Alloc_inline(8 * sizeof(IRType));
env->types_size = 8;
env->types_used = 0;
return env;
IRSB* emptyIRSB ( void )
{
- IRSB* bb = LibVEX_Alloc(sizeof(IRSB));
+ IRSB* bb = LibVEX_Alloc_inline(sizeof(IRSB));
bb->tyenv = emptyIRTypeEnv();
bb->stmts_used = 0;
bb->stmts_size = 8;
- bb->stmts = LibVEX_Alloc(bb->stmts_size * sizeof(IRStmt*));
+ bb->stmts = LibVEX_Alloc_inline(bb->stmts_size * sizeof(IRStmt*));
bb->next = NULL;
bb->jumpkind = Ijk_Boring;
bb->offsIP = 0;
IRExpr** newvec;
for (i = 0; vec[i]; i++)
;
- newvec = LibVEX_Alloc((i+1)*sizeof(IRExpr*));
+ newvec = LibVEX_Alloc_inline((i+1)*sizeof(IRExpr*));
for (i = 0; vec[i]; i++)
newvec[i] = vec[i];
newvec[i] = NULL;
IRExpr** newvec;
for (i = 0; vec[i]; i++)
;
- newvec = LibVEX_Alloc((i+1)*sizeof(IRExpr*));
+ newvec = LibVEX_Alloc_inline((i+1)*sizeof(IRExpr*));
for (i = 0; vec[i]; i++)
newvec[i] = deepCopyIRExpr(vec[i]);
newvec[i] = NULL;
IRTypeEnv* deepCopyIRTypeEnv ( const IRTypeEnv* src )
{
Int i;
- IRTypeEnv* dst = LibVEX_Alloc(sizeof(IRTypeEnv));
+ IRTypeEnv* dst = LibVEX_Alloc_inline(sizeof(IRTypeEnv));
dst->types_size = src->types_size;
dst->types_used = src->types_used;
- dst->types = LibVEX_Alloc(dst->types_size * sizeof(IRType));
+ dst->types = LibVEX_Alloc_inline(dst->types_size * sizeof(IRType));
for (i = 0; i < src->types_used; i++)
dst->types[i] = src->types[i];
return dst;
IRStmt** sts2;
IRSB* bb2 = deepCopyIRSBExceptStmts(bb);
bb2->stmts_used = bb2->stmts_size = bb->stmts_used;
- sts2 = LibVEX_Alloc(bb2->stmts_used * sizeof(IRStmt*));
+ sts2 = LibVEX_Alloc_inline(bb2->stmts_used * sizeof(IRStmt*));
for (i = 0; i < bb2->stmts_used; i++)
sts2[i] = deepCopyIRStmt(bb->stmts[i]);
bb2->stmts = sts2;
{
Int i;
if (bb->stmts_used == bb->stmts_size) {
- IRStmt** stmts2 = LibVEX_Alloc(2 * bb->stmts_size * sizeof(IRStmt*));
+ IRStmt** stmts2 = LibVEX_Alloc_inline(2 * bb->stmts_size * sizeof(IRStmt*));
for (i = 0; i < bb->stmts_size; i++)
stmts2[i] = bb->stmts[i];
bb->stmts = stmts2;
Int i;
Int new_size = env->types_size==0 ? 8 : 2*env->types_size;
IRType* new_types
- = LibVEX_Alloc(new_size * sizeof(IRType));
+ = LibVEX_Alloc_inline(new_size * sizeof(IRType));
for (i = 0; i < env->types_used; i++)
new_types[i] = env->types[i];
env->types = new_types;
{
Int i;
Int n_temps = bb->tyenv->types_used;
- Int* def_counts = LibVEX_Alloc(n_temps * sizeof(Int));
+ Int* def_counts = LibVEX_Alloc_inline(n_temps * sizeof(Int));
if (0)
vex_printf("sanityCheck: %s\n", caller);
static HashHW* newHHW ( void )
{
- HashHW* h = LibVEX_Alloc(sizeof(HashHW));
+ HashHW* h = LibVEX_Alloc_inline(sizeof(HashHW));
h->size = 8;
h->used = 0;
- h->inuse = LibVEX_Alloc(h->size * sizeof(Bool));
- h->key = LibVEX_Alloc(h->size * sizeof(HWord));
- h->val = LibVEX_Alloc(h->size * sizeof(HWord));
+ h->inuse = LibVEX_Alloc_inline(h->size * sizeof(Bool));
+ h->key = LibVEX_Alloc_inline(h->size * sizeof(HWord));
+ h->val = LibVEX_Alloc_inline(h->size * sizeof(HWord));
return h;
}
/* Ensure a space is available. */
if (h->used == h->size) {
/* Copy into arrays twice the size. */
- Bool* inuse2 = LibVEX_Alloc(2 * h->size * sizeof(Bool));
- HWord* key2 = LibVEX_Alloc(2 * h->size * sizeof(HWord));
- HWord* val2 = LibVEX_Alloc(2 * h->size * sizeof(HWord));
+ Bool* inuse2 = LibVEX_Alloc_inline(2 * h->size * sizeof(Bool));
+ HWord* key2 = LibVEX_Alloc_inline(2 * h->size * sizeof(HWord));
+ HWord* val2 = LibVEX_Alloc_inline(2 * h->size * sizeof(HWord));
for (i = j = 0; i < h->size; i++) {
if (!h->inuse[i]) continue;
inuse2[j] = True;
IRSB* out;
IRStmt* st2;
Int n_tmps = in->tyenv->types_used;
- IRExpr** env = LibVEX_Alloc(n_tmps * sizeof(IRExpr*));
+ IRExpr** env = LibVEX_Alloc_inline(n_tmps * sizeof(IRExpr*));
/* Keep track of IRStmt_LoadGs that we need to revisit after
processing all the other statements. */
const Int N_FIXUPS = 16;
{
Int i, i_unconditional_exit;
Int n_tmps = bb->tyenv->types_used;
- Bool* set = LibVEX_Alloc(n_tmps * sizeof(Bool));
+ Bool* set = LibVEX_Alloc_inline(n_tmps * sizeof(Bool));
IRStmt* st;
for (i = 0; i < n_tmps; i++)
/* We have to make two passes, one to count, one to copy. */
for (n = 0; ins[n]; n++)
;
- *outs = LibVEX_Alloc(n * sizeof(TmpOrConst));
+ *outs = LibVEX_Alloc_inline(n * sizeof(TmpOrConst));
*nOuts = n;
/* and now copy .. */
for (i = 0; i < n; i++) {
IRExpr_RdTmp(ae->u.Btt.arg1),
IRExpr_RdTmp(ae->u.Btt.arg2) );
case Btc:
- con = LibVEX_Alloc(sizeof(IRConst));
+ con = LibVEX_Alloc_inline(sizeof(IRConst));
*con = ae->u.Btc.con2;
return IRExpr_Binop( ae->u.Btc.op,
IRExpr_RdTmp(ae->u.Btc.arg1),
IRExpr_Const(con) );
case Bct:
- con = LibVEX_Alloc(sizeof(IRConst));
+ con = LibVEX_Alloc_inline(sizeof(IRConst));
*con = ae->u.Bct.con1;
return IRExpr_Binop( ae->u.Bct.op,
IRExpr_Const(con),
IRExpr_RdTmp(ae->u.Ittt.e1),
IRExpr_RdTmp(ae->u.Ittt.e0));
case Ittc:
- con0 = LibVEX_Alloc(sizeof(IRConst));
+ con0 = LibVEX_Alloc_inline(sizeof(IRConst));
*con0 = ae->u.Ittc.con0;
return IRExpr_ITE(IRExpr_RdTmp(ae->u.Ittc.co),
IRExpr_RdTmp(ae->u.Ittc.e1),
IRExpr_Const(con0));
case Itct:
- con1 = LibVEX_Alloc(sizeof(IRConst));
+ con1 = LibVEX_Alloc_inline(sizeof(IRConst));
*con1 = ae->u.Itct.con1;
return IRExpr_ITE(IRExpr_RdTmp(ae->u.Itct.co),
IRExpr_Const(con1),
IRExpr_RdTmp(ae->u.Itct.e0));
case Itcc:
- con0 = LibVEX_Alloc(sizeof(IRConst));
- con1 = LibVEX_Alloc(sizeof(IRConst));
+ con0 = LibVEX_Alloc_inline(sizeof(IRConst));
+ con1 = LibVEX_Alloc_inline(sizeof(IRConst));
*con0 = ae->u.Itcc.con0;
*con1 = ae->u.Itcc.con1;
return IRExpr_ITE(IRExpr_RdTmp(ae->u.Itcc.co),
case CCall: {
Int i, n = ae->u.CCall.nArgs;
vassert(n >= 0);
- IRExpr** vec = LibVEX_Alloc((n+1) * sizeof(IRExpr*));
+ IRExpr** vec = LibVEX_Alloc_inline((n+1) * sizeof(IRExpr*));
vec[n] = NULL;
for (i = 0; i < n; i++) {
vec[i] = tmpOrConst_to_IRExpr(&ae->u.CCall.args[i]);
switch (e->tag) {
case Iex_Unop:
if (e->Iex.Unop.arg->tag == Iex_RdTmp) {
- ae = LibVEX_Alloc(sizeof(AvailExpr));
+ ae = LibVEX_Alloc_inline(sizeof(AvailExpr));
ae->tag = Ut;
ae->u.Ut.op = e->Iex.Unop.op;
ae->u.Ut.arg = e->Iex.Unop.arg->Iex.RdTmp.tmp;
case Iex_Binop:
if (e->Iex.Binop.arg1->tag == Iex_RdTmp) {
if (e->Iex.Binop.arg2->tag == Iex_RdTmp) {
- ae = LibVEX_Alloc(sizeof(AvailExpr));
+ ae = LibVEX_Alloc_inline(sizeof(AvailExpr));
ae->tag = Btt;
ae->u.Btt.op = e->Iex.Binop.op;
ae->u.Btt.arg1 = e->Iex.Binop.arg1->Iex.RdTmp.tmp;
return ae;
}
if (e->Iex.Binop.arg2->tag == Iex_Const) {
- ae = LibVEX_Alloc(sizeof(AvailExpr));
+ ae = LibVEX_Alloc_inline(sizeof(AvailExpr));
ae->tag = Btc;
ae->u.Btc.op = e->Iex.Binop.op;
ae->u.Btc.arg1 = e->Iex.Binop.arg1->Iex.RdTmp.tmp;
}
} else if (e->Iex.Binop.arg1->tag == Iex_Const
&& e->Iex.Binop.arg2->tag == Iex_RdTmp) {
- ae = LibVEX_Alloc(sizeof(AvailExpr));
+ ae = LibVEX_Alloc_inline(sizeof(AvailExpr));
ae->tag = Bct;
ae->u.Bct.op = e->Iex.Binop.op;
ae->u.Bct.arg2 = e->Iex.Binop.arg2->Iex.RdTmp.tmp;
case Iex_Const:
if (e->Iex.Const.con->tag == Ico_F64i) {
- ae = LibVEX_Alloc(sizeof(AvailExpr));
+ ae = LibVEX_Alloc_inline(sizeof(AvailExpr));
ae->tag = Cf64i;
ae->u.Cf64i.f64i = e->Iex.Const.con->Ico.F64i;
return ae;
if (e->Iex.ITE.cond->tag == Iex_RdTmp) {
if (e->Iex.ITE.iffalse->tag == Iex_RdTmp) {
if (e->Iex.ITE.iftrue->tag == Iex_RdTmp) {
- ae = LibVEX_Alloc(sizeof(AvailExpr));
+ ae = LibVEX_Alloc_inline(sizeof(AvailExpr));
ae->tag = Ittt;
ae->u.Ittt.co = e->Iex.ITE.cond->Iex.RdTmp.tmp;
ae->u.Ittt.e1 = e->Iex.ITE.iftrue->Iex.RdTmp.tmp;
return ae;
}
if (e->Iex.ITE.iftrue->tag == Iex_Const) {
- ae = LibVEX_Alloc(sizeof(AvailExpr));
+ ae = LibVEX_Alloc_inline(sizeof(AvailExpr));
ae->tag = Itct;
ae->u.Itct.co = e->Iex.ITE.cond->Iex.RdTmp.tmp;
ae->u.Itct.con1 = *(e->Iex.ITE.iftrue->Iex.Const.con);
}
} else if (e->Iex.ITE.iffalse->tag == Iex_Const) {
if (e->Iex.ITE.iftrue->tag == Iex_RdTmp) {
- ae = LibVEX_Alloc(sizeof(AvailExpr));
+ ae = LibVEX_Alloc_inline(sizeof(AvailExpr));
ae->tag = Ittc;
ae->u.Ittc.co = e->Iex.ITE.cond->Iex.RdTmp.tmp;
ae->u.Ittc.e1 = e->Iex.ITE.iftrue->Iex.RdTmp.tmp;
return ae;
}
if (e->Iex.ITE.iftrue->tag == Iex_Const) {
- ae = LibVEX_Alloc(sizeof(AvailExpr));
+ ae = LibVEX_Alloc_inline(sizeof(AvailExpr));
ae->tag = Itcc;
ae->u.Itcc.co = e->Iex.ITE.cond->Iex.RdTmp.tmp;
ae->u.Itcc.con1 = *(e->Iex.ITE.iftrue->Iex.Const.con);
case Iex_GetI:
if (e->Iex.GetI.ix->tag == Iex_RdTmp) {
- ae = LibVEX_Alloc(sizeof(AvailExpr));
+ ae = LibVEX_Alloc_inline(sizeof(AvailExpr));
ae->tag = GetIt;
ae->u.GetIt.descr = e->Iex.GetI.descr;
ae->u.GetIt.ix = e->Iex.GetI.ix->Iex.RdTmp.tmp;
break;
case Iex_CCall:
- ae = LibVEX_Alloc(sizeof(AvailExpr));
+ ae = LibVEX_Alloc_inline(sizeof(AvailExpr));
ae->tag = CCall;
/* Ok to share only the cee, since it is immutable. */
ae->u.CCall.cee = e->Iex.CCall.cee;
"available", which effectively disables CSEing of them, as
desired. */
if (allowLoadsToBeCSEd) {
- ae = LibVEX_Alloc(sizeof(AvailExpr));
+ ae = LibVEX_Alloc_inline(sizeof(AvailExpr));
ae->tag = Load;
ae->u.Load.end = e->Iex.Load.end;
ae->u.Load.ty = e->Iex.Load.ty;
Addr max_ga = 0;
Int n_tmps = bb->tyenv->types_used;
- UShort* uses = LibVEX_Alloc(n_tmps * sizeof(UShort));
+ UShort* uses = LibVEX_Alloc_inline(n_tmps * sizeof(UShort));
/* Phase 1. Scan forwards in bb, counting use occurrences of each
temp. Also count occurrences in the bb->next field. Take the
/* Make the tmp->expr environment, so we can use it for
chasing expressions. */
Int n_tmps = sb->tyenv->types_used;
- IRExpr** env = LibVEX_Alloc(n_tmps * sizeof(IRExpr*));
+ IRExpr** env = LibVEX_Alloc_inline(n_tmps * sizeof(IRExpr*));
for (i = 0; i < n_tmps; i++)
env[i] = NULL;
into memory, the rate falls by about a factor of 3.
*/
-/* Allocated memory as returned by LibVEX_Alloc will be aligned on this
- boundary. */
-#define REQ_ALIGN 8
-
#define N_TEMPORARY_BYTES 5000000
static HChar temporary[N_TEMPORARY_BYTES] __attribute__((aligned(REQ_ALIGN)));
static HChar* permanent_curr = &permanent[0];
static HChar* permanent_last = &permanent[N_PERMANENT_BYTES-1];
-static HChar* private_LibVEX_alloc_first = &temporary[0];
-static HChar* private_LibVEX_alloc_curr = &temporary[0];
-static HChar* private_LibVEX_alloc_last = &temporary[N_TEMPORARY_BYTES-1];
+HChar* private_LibVEX_alloc_first = &temporary[0];
+HChar* private_LibVEX_alloc_curr = &temporary[0];
+HChar* private_LibVEX_alloc_last = &temporary[N_TEMPORARY_BYTES-1];
static VexAllocMode mode = VexAllocModeTEMP;
}
__attribute__((noreturn))
-static void private_LibVEX_alloc_OOM(void)
+void private_LibVEX_alloc_OOM(void)
{
const HChar* pool = "???";
if (private_LibVEX_alloc_first == &temporary[0]) pool = "TEMP";
/* Exported to library client. */
-/* Allocate in Vex's temporary allocation area. Be careful with this.
- You can only call it inside an instrumentation or optimisation
- callback that you have previously specified in a call to
- LibVEX_Translate. The storage allocated will only stay alive until
- translation of the current basic block is complete.
- */
-
-void* LibVEX_Alloc ( SizeT nbytes )
-{
- struct align {
- char c;
- union {
- char c;
- short s;
- int i;
- long l;
- long long ll;
- float f;
- double d;
- /* long double is currently not used and would increase alignment
- unnecessarily. */
- /* long double ld; */
- void *pto;
- void (*ptf)(void);
- } x;
- };
-
- /* Make sure the compiler does no surprise us */
- vassert(offsetof(struct align,x) <= REQ_ALIGN);
-
-#if 0
- /* Nasty debugging hack, do not use. */
- return malloc(nbytes);
-#else
- HChar* curr;
- HChar* next;
- SizeT ALIGN;
- ALIGN = offsetof(struct align,x) - 1;
- nbytes = (nbytes + ALIGN) & ~ALIGN;
- curr = private_LibVEX_alloc_curr;
- next = curr + nbytes;
- if (next >= private_LibVEX_alloc_last)
- private_LibVEX_alloc_OOM();
- private_LibVEX_alloc_curr = next;
- return curr;
-#endif
-}
-
void LibVEX_ShowAllocStats ( void )
{
vex_printf("vex storage: T total %lld bytes allocated\n",
(Long)(permanent_curr - permanent_first) );
}
+void *LibVEX_Alloc ( SizeT nbytes )
+{
+ return LibVEX_Alloc_inline(nbytes);
+}
/*---------------------------------------------------------*/
/*--- Bombing out ---*/
extern void vexSetAllocModeTEMP_and_clear ( void );
+/* Allocate in Vex's temporary allocation area. Be careful with this.
+ You can only call it inside an instrumentation or optimisation
+ callback that you have previously specified in a call to
+ LibVEX_Translate. The storage allocated will only stay alive until
+ translation of the current basic block is complete.
+ */
+extern HChar* private_LibVEX_alloc_first;
+extern HChar* private_LibVEX_alloc_curr;
+extern HChar* private_LibVEX_alloc_last;
+extern void private_LibVEX_alloc_OOM(void) __attribute__((noreturn));
+
+/* Allocated memory as returned by LibVEX_Alloc will be aligned on this
+ boundary. */
+#define REQ_ALIGN 8
+
+static inline void* LibVEX_Alloc_inline ( SizeT nbytes )
+{
+ struct align {
+ char c;
+ union {
+ char c;
+ short s;
+ int i;
+ long l;
+ long long ll;
+ float f;
+ double d;
+ /* long double is currently not used and would increase alignment
+ unnecessarily. */
+ /* long double ld; */
+ void *pto;
+ void (*ptf)(void);
+ } x;
+ };
+
+ /* Make sure the compiler does no surprise us */
+ vassert(offsetof(struct align,x) <= REQ_ALIGN);
+
+#if 0
+ /* Nasty debugging hack, do not use. */
+ return malloc(nbytes);
+#else
+ HChar* curr;
+ HChar* next;
+ SizeT ALIGN;
+ ALIGN = offsetof(struct align,x) - 1;
+ nbytes = (nbytes + ALIGN) & ~ALIGN;
+ curr = private_LibVEX_alloc_curr;
+ next = curr + nbytes;
+ if (next >= private_LibVEX_alloc_last)
+ private_LibVEX_alloc_OOM();
+ private_LibVEX_alloc_curr = next;
+ return curr;
+#endif
+}
+
#endif /* ndef __VEX_MAIN_UTIL_H */
/*---------------------------------------------------------------*/