From: Florian Krohm Date: Fri, 13 Mar 2015 12:46:49 +0000 (+0000) Subject: r2974 moved the inline definition of LibVEX_Alloc from libvex.h X-Git-Tag: svn/VALGRIND_3_11_0^2~79 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=f26b4d4918f7ea904e297d643c370550a3d308ac;p=thirdparty%2Fvalgrind.git r2974 moved the inline definition of LibVEX_Alloc from libvex.h to main_util.c because it caused linker problems with ICC. See comments in BZ #339542. This change re-enables inlining of that function by adding it (renamed as LibVEX_Alloc_inline) to main_util.h. 500+ callsites changed accordingly. git-svn-id: svn://svn.valgrind.org/vex/trunk@3103 --- diff --git a/VEX/priv/host_amd64_defs.c b/VEX/priv/host_amd64_defs.c index 97215a332d..3bf2ae7aa7 100644 --- a/VEX/priv/host_amd64_defs.c +++ b/VEX/priv/host_amd64_defs.c @@ -136,7 +136,7 @@ void getAllocableRegs_AMD64 ( Int* nregs, HReg** arr ) { #if 0 *nregs = 6; - *arr = LibVEX_Alloc(*nregs * sizeof(HReg)); + *arr = LibVEX_Alloc_inline(*nregs * sizeof(HReg)); (*arr)[ 0] = hregAMD64_RSI(); (*arr)[ 1] = hregAMD64_RDI(); (*arr)[ 2] = hregAMD64_RBX(); @@ -147,7 +147,7 @@ void getAllocableRegs_AMD64 ( Int* nregs, HReg** arr ) #endif #if 1 *nregs = 20; - *arr = LibVEX_Alloc(*nregs * sizeof(HReg)); + *arr = LibVEX_Alloc_inline(*nregs * sizeof(HReg)); (*arr)[ 0] = hregAMD64_RSI(); (*arr)[ 1] = hregAMD64_RDI(); (*arr)[ 2] = hregAMD64_R8(); @@ -203,14 +203,14 @@ const HChar* showAMD64CondCode ( AMD64CondCode cond ) /* --------- AMD64AMode: memory address expressions. --------- */ AMD64AMode* AMD64AMode_IR ( UInt imm32, HReg reg ) { - AMD64AMode* am = LibVEX_Alloc(sizeof(AMD64AMode)); + AMD64AMode* am = LibVEX_Alloc_inline(sizeof(AMD64AMode)); am->tag = Aam_IR; am->Aam.IR.imm = imm32; am->Aam.IR.reg = reg; return am; } AMD64AMode* AMD64AMode_IRRS ( UInt imm32, HReg base, HReg indEx, Int shift ) { - AMD64AMode* am = LibVEX_Alloc(sizeof(AMD64AMode)); + AMD64AMode* am = LibVEX_Alloc_inline(sizeof(AMD64AMode)); am->tag = Aam_IRRS; am->Aam.IRRS.imm = imm32; am->Aam.IRRS.base = base; @@ -273,19 +273,19 @@ static void mapRegs_AMD64AMode ( HRegRemap* m, AMD64AMode* am ) { /* --------- Operand, which can be reg, immediate or memory. --------- */ AMD64RMI* AMD64RMI_Imm ( UInt imm32 ) { - AMD64RMI* op = LibVEX_Alloc(sizeof(AMD64RMI)); + AMD64RMI* op = LibVEX_Alloc_inline(sizeof(AMD64RMI)); op->tag = Armi_Imm; op->Armi.Imm.imm32 = imm32; return op; } AMD64RMI* AMD64RMI_Reg ( HReg reg ) { - AMD64RMI* op = LibVEX_Alloc(sizeof(AMD64RMI)); + AMD64RMI* op = LibVEX_Alloc_inline(sizeof(AMD64RMI)); op->tag = Armi_Reg; op->Armi.Reg.reg = reg; return op; } AMD64RMI* AMD64RMI_Mem ( AMD64AMode* am ) { - AMD64RMI* op = LibVEX_Alloc(sizeof(AMD64RMI)); + AMD64RMI* op = LibVEX_Alloc_inline(sizeof(AMD64RMI)); op->tag = Armi_Mem; op->Armi.Mem.am = am; return op; @@ -353,13 +353,13 @@ static void mapRegs_AMD64RMI ( HRegRemap* m, AMD64RMI* op ) { /* --------- Operand, which can be reg or immediate only. --------- */ AMD64RI* AMD64RI_Imm ( UInt imm32 ) { - AMD64RI* op = LibVEX_Alloc(sizeof(AMD64RI)); + AMD64RI* op = LibVEX_Alloc_inline(sizeof(AMD64RI)); op->tag = Ari_Imm; op->Ari.Imm.imm32 = imm32; return op; } AMD64RI* AMD64RI_Reg ( HReg reg ) { - AMD64RI* op = LibVEX_Alloc(sizeof(AMD64RI)); + AMD64RI* op = LibVEX_Alloc_inline(sizeof(AMD64RI)); op->tag = Ari_Reg; op->Ari.Reg.reg = reg; return op; @@ -409,13 +409,13 @@ static void mapRegs_AMD64RI ( HRegRemap* m, AMD64RI* op ) { /* --------- Operand, which can be reg or memory only. --------- */ AMD64RM* AMD64RM_Reg ( HReg reg ) { - AMD64RM* op = LibVEX_Alloc(sizeof(AMD64RM)); + AMD64RM* op = LibVEX_Alloc_inline(sizeof(AMD64RM)); op->tag = Arm_Reg; op->Arm.Reg.reg = reg; return op; } AMD64RM* AMD64RM_Mem ( AMD64AMode* am ) { - AMD64RM* op = LibVEX_Alloc(sizeof(AMD64RM)); + AMD64RM* op = LibVEX_Alloc_inline(sizeof(AMD64RM)); op->tag = Arm_Mem; op->Arm.Mem.am = am; return op; @@ -606,14 +606,14 @@ const HChar* showAMD64SseOp ( AMD64SseOp op ) { } AMD64Instr* AMD64Instr_Imm64 ( ULong imm64, HReg dst ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_Imm64; i->Ain.Imm64.imm64 = imm64; i->Ain.Imm64.dst = dst; return i; } AMD64Instr* AMD64Instr_Alu64R ( AMD64AluOp op, AMD64RMI* src, HReg dst ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_Alu64R; i->Ain.Alu64R.op = op; i->Ain.Alu64R.src = src; @@ -621,7 +621,7 @@ AMD64Instr* AMD64Instr_Alu64R ( AMD64AluOp op, AMD64RMI* src, HReg dst ) { return i; } AMD64Instr* AMD64Instr_Alu64M ( AMD64AluOp op, AMD64RI* src, AMD64AMode* dst ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_Alu64M; i->Ain.Alu64M.op = op; i->Ain.Alu64M.src = src; @@ -630,7 +630,7 @@ AMD64Instr* AMD64Instr_Alu64M ( AMD64AluOp op, AMD64RI* src, AMD64AMode* dst ) { return i; } AMD64Instr* AMD64Instr_Sh64 ( AMD64ShiftOp op, UInt src, HReg dst ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_Sh64; i->Ain.Sh64.op = op; i->Ain.Sh64.src = src; @@ -638,28 +638,28 @@ AMD64Instr* AMD64Instr_Sh64 ( AMD64ShiftOp op, UInt src, HReg dst ) { return i; } AMD64Instr* AMD64Instr_Test64 ( UInt imm32, HReg dst ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_Test64; i->Ain.Test64.imm32 = imm32; i->Ain.Test64.dst = dst; return i; } AMD64Instr* AMD64Instr_Unary64 ( AMD64UnaryOp op, HReg dst ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_Unary64; i->Ain.Unary64.op = op; i->Ain.Unary64.dst = dst; return i; } AMD64Instr* AMD64Instr_Lea64 ( AMD64AMode* am, HReg dst ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_Lea64; i->Ain.Lea64.am = am; i->Ain.Lea64.dst = dst; return i; } AMD64Instr* AMD64Instr_Alu32R ( AMD64AluOp op, AMD64RMI* src, HReg dst ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_Alu32R; i->Ain.Alu32R.op = op; i->Ain.Alu32R.src = src; @@ -672,14 +672,14 @@ AMD64Instr* AMD64Instr_Alu32R ( AMD64AluOp op, AMD64RMI* src, HReg dst ) { return i; } AMD64Instr* AMD64Instr_MulL ( Bool syned, AMD64RM* src ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_MulL; i->Ain.MulL.syned = syned; i->Ain.MulL.src = src; return i; } AMD64Instr* AMD64Instr_Div ( Bool syned, Int sz, AMD64RM* src ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_Div; i->Ain.Div.syned = syned; i->Ain.Div.sz = sz; @@ -688,14 +688,14 @@ AMD64Instr* AMD64Instr_Div ( Bool syned, Int sz, AMD64RM* src ) { return i; } AMD64Instr* AMD64Instr_Push( AMD64RMI* src ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_Push; i->Ain.Push.src = src; return i; } AMD64Instr* AMD64Instr_Call ( AMD64CondCode cond, Addr64 target, Int regparms, RetLoc rloc ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_Call; i->Ain.Call.cond = cond; i->Ain.Call.target = target; @@ -708,7 +708,7 @@ AMD64Instr* AMD64Instr_Call ( AMD64CondCode cond, Addr64 target, Int regparms, AMD64Instr* AMD64Instr_XDirect ( Addr64 dstGA, AMD64AMode* amRIP, AMD64CondCode cond, Bool toFastEP ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_XDirect; i->Ain.XDirect.dstGA = dstGA; i->Ain.XDirect.amRIP = amRIP; @@ -718,7 +718,7 @@ AMD64Instr* AMD64Instr_XDirect ( Addr64 dstGA, AMD64AMode* amRIP, } AMD64Instr* AMD64Instr_XIndir ( HReg dstGA, AMD64AMode* amRIP, AMD64CondCode cond ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_XIndir; i->Ain.XIndir.dstGA = dstGA; i->Ain.XIndir.amRIP = amRIP; @@ -727,7 +727,7 @@ AMD64Instr* AMD64Instr_XIndir ( HReg dstGA, AMD64AMode* amRIP, } AMD64Instr* AMD64Instr_XAssisted ( HReg dstGA, AMD64AMode* amRIP, AMD64CondCode cond, IRJumpKind jk ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_XAssisted; i->Ain.XAssisted.dstGA = dstGA; i->Ain.XAssisted.amRIP = amRIP; @@ -737,7 +737,7 @@ AMD64Instr* AMD64Instr_XAssisted ( HReg dstGA, AMD64AMode* amRIP, } AMD64Instr* AMD64Instr_CMov64 ( AMD64CondCode cond, HReg src, HReg dst ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_CMov64; i->Ain.CMov64.cond = cond; i->Ain.CMov64.src = src; @@ -747,7 +747,7 @@ AMD64Instr* AMD64Instr_CMov64 ( AMD64CondCode cond, HReg src, HReg dst ) { } AMD64Instr* AMD64Instr_CLoad ( AMD64CondCode cond, UChar szB, AMD64AMode* addr, HReg dst ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_CLoad; i->Ain.CLoad.cond = cond; i->Ain.CLoad.szB = szB; @@ -758,7 +758,7 @@ AMD64Instr* AMD64Instr_CLoad ( AMD64CondCode cond, UChar szB, } AMD64Instr* AMD64Instr_CStore ( AMD64CondCode cond, UChar szB, HReg src, AMD64AMode* addr ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_CStore; i->Ain.CStore.cond = cond; i->Ain.CStore.szB = szB; @@ -768,7 +768,7 @@ AMD64Instr* AMD64Instr_CStore ( AMD64CondCode cond, UChar szB, return i; } AMD64Instr* AMD64Instr_MovxLQ ( Bool syned, HReg src, HReg dst ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_MovxLQ; i->Ain.MovxLQ.syned = syned; i->Ain.MovxLQ.src = src; @@ -777,7 +777,7 @@ AMD64Instr* AMD64Instr_MovxLQ ( Bool syned, HReg src, HReg dst ) { } AMD64Instr* AMD64Instr_LoadEX ( UChar szSmall, Bool syned, AMD64AMode* src, HReg dst ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_LoadEX; i->Ain.LoadEX.szSmall = szSmall; i->Ain.LoadEX.syned = syned; @@ -787,7 +787,7 @@ AMD64Instr* AMD64Instr_LoadEX ( UChar szSmall, Bool syned, return i; } AMD64Instr* AMD64Instr_Store ( UChar sz, HReg src, AMD64AMode* dst ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_Store; i->Ain.Store.sz = sz; i->Ain.Store.src = src; @@ -796,14 +796,14 @@ AMD64Instr* AMD64Instr_Store ( UChar sz, HReg src, AMD64AMode* dst ) { return i; } AMD64Instr* AMD64Instr_Set64 ( AMD64CondCode cond, HReg dst ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_Set64; i->Ain.Set64.cond = cond; i->Ain.Set64.dst = dst; return i; } AMD64Instr* AMD64Instr_Bsfr64 ( Bool isFwds, HReg src, HReg dst ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_Bsfr64; i->Ain.Bsfr64.isFwds = isFwds; i->Ain.Bsfr64.src = src; @@ -811,12 +811,12 @@ AMD64Instr* AMD64Instr_Bsfr64 ( Bool isFwds, HReg src, HReg dst ) { return i; } AMD64Instr* AMD64Instr_MFence ( void ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_MFence; return i; } AMD64Instr* AMD64Instr_ACAS ( AMD64AMode* addr, UChar sz ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_ACAS; i->Ain.ACAS.addr = addr; i->Ain.ACAS.sz = sz; @@ -824,7 +824,7 @@ AMD64Instr* AMD64Instr_ACAS ( AMD64AMode* addr, UChar sz ) { return i; } AMD64Instr* AMD64Instr_DACAS ( AMD64AMode* addr, UChar sz ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_DACAS; i->Ain.DACAS.addr = addr; i->Ain.DACAS.sz = sz; @@ -834,7 +834,7 @@ AMD64Instr* AMD64Instr_DACAS ( AMD64AMode* addr, UChar sz ) { AMD64Instr* AMD64Instr_A87Free ( Int nregs ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_A87Free; i->Ain.A87Free.nregs = nregs; vassert(nregs >= 1 && nregs <= 7); @@ -842,7 +842,7 @@ AMD64Instr* AMD64Instr_A87Free ( Int nregs ) } AMD64Instr* AMD64Instr_A87PushPop ( AMD64AMode* addr, Bool isPush, UChar szB ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_A87PushPop; i->Ain.A87PushPop.addr = addr; i->Ain.A87PushPop.isPush = isPush; @@ -852,33 +852,33 @@ AMD64Instr* AMD64Instr_A87PushPop ( AMD64AMode* addr, Bool isPush, UChar szB ) } AMD64Instr* AMD64Instr_A87FpOp ( A87FpOp op ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_A87FpOp; i->Ain.A87FpOp.op = op; return i; } AMD64Instr* AMD64Instr_A87LdCW ( AMD64AMode* addr ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_A87LdCW; i->Ain.A87LdCW.addr = addr; return i; } AMD64Instr* AMD64Instr_A87StSW ( AMD64AMode* addr ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_A87StSW; i->Ain.A87StSW.addr = addr; return i; } AMD64Instr* AMD64Instr_LdMXCSR ( AMD64AMode* addr ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_LdMXCSR; i->Ain.LdMXCSR.addr = addr; return i; } AMD64Instr* AMD64Instr_SseUComIS ( Int sz, HReg srcL, HReg srcR, HReg dst ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_SseUComIS; i->Ain.SseUComIS.sz = toUChar(sz); i->Ain.SseUComIS.srcL = srcL; @@ -888,7 +888,7 @@ AMD64Instr* AMD64Instr_SseUComIS ( Int sz, HReg srcL, HReg srcR, HReg dst ) { return i; } AMD64Instr* AMD64Instr_SseSI2SF ( Int szS, Int szD, HReg src, HReg dst ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_SseSI2SF; i->Ain.SseSI2SF.szS = toUChar(szS); i->Ain.SseSI2SF.szD = toUChar(szD); @@ -899,7 +899,7 @@ AMD64Instr* AMD64Instr_SseSI2SF ( Int szS, Int szD, HReg src, HReg dst ) { return i; } AMD64Instr* AMD64Instr_SseSF2SI ( Int szS, Int szD, HReg src, HReg dst ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_SseSF2SI; i->Ain.SseSF2SI.szS = toUChar(szS); i->Ain.SseSF2SI.szD = toUChar(szD); @@ -911,7 +911,7 @@ AMD64Instr* AMD64Instr_SseSF2SI ( Int szS, Int szD, HReg src, HReg dst ) { } AMD64Instr* AMD64Instr_SseSDSS ( Bool from64, HReg src, HReg dst ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_SseSDSS; i->Ain.SseSDSS.from64 = from64; i->Ain.SseSDSS.src = src; @@ -920,7 +920,7 @@ AMD64Instr* AMD64Instr_SseSDSS ( Bool from64, HReg src, HReg dst ) } AMD64Instr* AMD64Instr_SseLdSt ( Bool isLoad, Int sz, HReg reg, AMD64AMode* addr ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_SseLdSt; i->Ain.SseLdSt.isLoad = isLoad; i->Ain.SseLdSt.sz = toUChar(sz); @@ -931,7 +931,7 @@ AMD64Instr* AMD64Instr_SseLdSt ( Bool isLoad, Int sz, } AMD64Instr* AMD64Instr_SseLdzLO ( Int sz, HReg reg, AMD64AMode* addr ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_SseLdzLO; i->Ain.SseLdzLO.sz = sz; i->Ain.SseLdzLO.reg = reg; @@ -940,7 +940,7 @@ AMD64Instr* AMD64Instr_SseLdzLO ( Int sz, HReg reg, AMD64AMode* addr ) return i; } AMD64Instr* AMD64Instr_Sse32Fx4 ( AMD64SseOp op, HReg src, HReg dst ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_Sse32Fx4; i->Ain.Sse32Fx4.op = op; i->Ain.Sse32Fx4.src = src; @@ -949,7 +949,7 @@ AMD64Instr* AMD64Instr_Sse32Fx4 ( AMD64SseOp op, HReg src, HReg dst ) { return i; } AMD64Instr* AMD64Instr_Sse32FLo ( AMD64SseOp op, HReg src, HReg dst ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_Sse32FLo; i->Ain.Sse32FLo.op = op; i->Ain.Sse32FLo.src = src; @@ -958,7 +958,7 @@ AMD64Instr* AMD64Instr_Sse32FLo ( AMD64SseOp op, HReg src, HReg dst ) { return i; } AMD64Instr* AMD64Instr_Sse64Fx2 ( AMD64SseOp op, HReg src, HReg dst ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_Sse64Fx2; i->Ain.Sse64Fx2.op = op; i->Ain.Sse64Fx2.src = src; @@ -967,7 +967,7 @@ AMD64Instr* AMD64Instr_Sse64Fx2 ( AMD64SseOp op, HReg src, HReg dst ) { return i; } AMD64Instr* AMD64Instr_Sse64FLo ( AMD64SseOp op, HReg src, HReg dst ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_Sse64FLo; i->Ain.Sse64FLo.op = op; i->Ain.Sse64FLo.src = src; @@ -976,7 +976,7 @@ AMD64Instr* AMD64Instr_Sse64FLo ( AMD64SseOp op, HReg src, HReg dst ) { return i; } AMD64Instr* AMD64Instr_SseReRg ( AMD64SseOp op, HReg re, HReg rg ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_SseReRg; i->Ain.SseReRg.op = op; i->Ain.SseReRg.src = re; @@ -984,7 +984,7 @@ AMD64Instr* AMD64Instr_SseReRg ( AMD64SseOp op, HReg re, HReg rg ) { return i; } AMD64Instr* AMD64Instr_SseCMov ( AMD64CondCode cond, HReg src, HReg dst ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_SseCMov; i->Ain.SseCMov.cond = cond; i->Ain.SseCMov.src = src; @@ -993,7 +993,7 @@ AMD64Instr* AMD64Instr_SseCMov ( AMD64CondCode cond, HReg src, HReg dst ) { return i; } AMD64Instr* AMD64Instr_SseShuf ( Int order, HReg src, HReg dst ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_SseShuf; i->Ain.SseShuf.order = order; i->Ain.SseShuf.src = src; @@ -1003,7 +1003,7 @@ AMD64Instr* AMD64Instr_SseShuf ( Int order, HReg src, HReg dst ) { } //uu AMD64Instr* AMD64Instr_AvxLdSt ( Bool isLoad, //uu HReg reg, AMD64AMode* addr ) { -//uu AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); +//uu AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); //uu i->tag = Ain_AvxLdSt; //uu i->Ain.AvxLdSt.isLoad = isLoad; //uu i->Ain.AvxLdSt.reg = reg; @@ -1011,7 +1011,7 @@ AMD64Instr* AMD64Instr_SseShuf ( Int order, HReg src, HReg dst ) { //uu return i; //uu } //uu AMD64Instr* AMD64Instr_AvxReRg ( AMD64SseOp op, HReg re, HReg rg ) { -//uu AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); +//uu AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); //uu i->tag = Ain_AvxReRg; //uu i->Ain.AvxReRg.op = op; //uu i->Ain.AvxReRg.src = re; @@ -1020,14 +1020,14 @@ AMD64Instr* AMD64Instr_SseShuf ( Int order, HReg src, HReg dst ) { //uu } AMD64Instr* AMD64Instr_EvCheck ( AMD64AMode* amCounter, AMD64AMode* amFailAddr ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_EvCheck; i->Ain.EvCheck.amCounter = amCounter; i->Ain.EvCheck.amFailAddr = amFailAddr; return i; } AMD64Instr* AMD64Instr_ProfInc ( void ) { - AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); + AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr)); i->tag = Ain_ProfInc; return i; } diff --git a/VEX/priv/host_amd64_isel.c b/VEX/priv/host_amd64_isel.c index 864603719a..6f9e125bcd 100644 --- a/VEX/priv/host_amd64_isel.c +++ b/VEX/priv/host_amd64_isel.c @@ -4923,7 +4923,7 @@ HInstrArray* iselSB_AMD64 ( const IRSB* bb, vassert(archinfo_host->endness == VexEndnessLE); /* Make up an initial environment to use. */ - env = LibVEX_Alloc(sizeof(ISelEnv)); + env = LibVEX_Alloc_inline(sizeof(ISelEnv)); env->vreg_ctr = 0; /* Set up output code array. */ @@ -4935,8 +4935,8 @@ HInstrArray* iselSB_AMD64 ( const IRSB* bb, /* Make up an IRTemp -> virtual HReg mapping. This doesn't change as we go along. */ env->n_vregmap = bb->tyenv->types_used; - env->vregmap = LibVEX_Alloc(env->n_vregmap * sizeof(HReg)); - env->vregmapHI = LibVEX_Alloc(env->n_vregmap * sizeof(HReg)); + env->vregmap = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg)); + env->vregmapHI = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg)); /* and finally ... */ env->chainingAllowed = chainingAllowed; diff --git a/VEX/priv/host_arm64_defs.c b/VEX/priv/host_arm64_defs.c index 7cc0910a12..fbfca3ceb3 100644 --- a/VEX/priv/host_arm64_defs.c +++ b/VEX/priv/host_arm64_defs.c @@ -124,7 +124,7 @@ void getAllocableRegs_ARM64 ( Int* nregs, HReg** arr ) { Int i = 0; *nregs = 26; - *arr = LibVEX_Alloc(*nregs * sizeof(HReg)); + *arr = LibVEX_Alloc_inline(*nregs * sizeof(HReg)); // callee saves ones (22 to 28) are listed first, since we prefer // them if they're available @@ -222,7 +222,7 @@ static const HChar* showARM64CondCode ( ARM64CondCode cond ) { /* --------- Memory address expressions (amodes). --------- */ ARM64AMode* ARM64AMode_RI9 ( HReg reg, Int simm9 ) { - ARM64AMode* am = LibVEX_Alloc(sizeof(ARM64AMode)); + ARM64AMode* am = LibVEX_Alloc_inline(sizeof(ARM64AMode)); am->tag = ARM64am_RI9; am->ARM64am.RI9.reg = reg; am->ARM64am.RI9.simm9 = simm9; @@ -231,7 +231,7 @@ ARM64AMode* ARM64AMode_RI9 ( HReg reg, Int simm9 ) { } ARM64AMode* ARM64AMode_RI12 ( HReg reg, Int uimm12, UChar szB ) { - ARM64AMode* am = LibVEX_Alloc(sizeof(ARM64AMode)); + ARM64AMode* am = LibVEX_Alloc_inline(sizeof(ARM64AMode)); am->tag = ARM64am_RI12; am->ARM64am.RI12.reg = reg; am->ARM64am.RI12.uimm12 = uimm12; @@ -245,7 +245,7 @@ ARM64AMode* ARM64AMode_RI12 ( HReg reg, Int uimm12, UChar szB ) { } ARM64AMode* ARM64AMode_RR ( HReg base, HReg index ) { - ARM64AMode* am = LibVEX_Alloc(sizeof(ARM64AMode)); + ARM64AMode* am = LibVEX_Alloc_inline(sizeof(ARM64AMode)); am->tag = ARM64am_RR; am->ARM64am.RR.base = base; am->ARM64am.RR.index = index; @@ -315,7 +315,7 @@ static void mapRegs_ARM64AMode ( HRegRemap* m, ARM64AMode* am ) { /* --------- Reg or uimm12<<{0,12} operands --------- */ ARM64RIA* ARM64RIA_I12 ( UShort imm12, UChar shift ) { - ARM64RIA* riA = LibVEX_Alloc(sizeof(ARM64RIA)); + ARM64RIA* riA = LibVEX_Alloc_inline(sizeof(ARM64RIA)); riA->tag = ARM64riA_I12; riA->ARM64riA.I12.imm12 = imm12; riA->ARM64riA.I12.shift = shift; @@ -324,7 +324,7 @@ ARM64RIA* ARM64RIA_I12 ( UShort imm12, UChar shift ) { return riA; } ARM64RIA* ARM64RIA_R ( HReg reg ) { - ARM64RIA* riA = LibVEX_Alloc(sizeof(ARM64RIA)); + ARM64RIA* riA = LibVEX_Alloc_inline(sizeof(ARM64RIA)); riA->tag = ARM64riA_R; riA->ARM64riA.R.reg = reg; return riA; @@ -372,7 +372,7 @@ static void mapRegs_ARM64RIA ( HRegRemap* m, ARM64RIA* riA ) { /* --------- Reg or "bitfield" (logic immediate) operands --------- */ ARM64RIL* ARM64RIL_I13 ( UChar bitN, UChar immR, UChar immS ) { - ARM64RIL* riL = LibVEX_Alloc(sizeof(ARM64RIL)); + ARM64RIL* riL = LibVEX_Alloc_inline(sizeof(ARM64RIL)); riL->tag = ARM64riL_I13; riL->ARM64riL.I13.bitN = bitN; riL->ARM64riL.I13.immR = immR; @@ -383,7 +383,7 @@ ARM64RIL* ARM64RIL_I13 ( UChar bitN, UChar immR, UChar immS ) { return riL; } ARM64RIL* ARM64RIL_R ( HReg reg ) { - ARM64RIL* riL = LibVEX_Alloc(sizeof(ARM64RIL)); + ARM64RIL* riL = LibVEX_Alloc_inline(sizeof(ARM64RIL)); riL->tag = ARM64riL_R; riL->ARM64riL.R.reg = reg; return riL; @@ -433,14 +433,14 @@ static void mapRegs_ARM64RIL ( HRegRemap* m, ARM64RIL* riL ) { /* --------------- Reg or uimm6 operands --------------- */ ARM64RI6* ARM64RI6_I6 ( UInt imm6 ) { - ARM64RI6* ri6 = LibVEX_Alloc(sizeof(ARM64RI6)); + ARM64RI6* ri6 = LibVEX_Alloc_inline(sizeof(ARM64RI6)); ri6->tag = ARM64ri6_I6; ri6->ARM64ri6.I6.imm6 = imm6; vassert(imm6 > 0 && imm6 < 64); return ri6; } ARM64RI6* ARM64RI6_R ( HReg reg ) { - ARM64RI6* ri6 = LibVEX_Alloc(sizeof(ARM64RI6)); + ARM64RI6* ri6 = LibVEX_Alloc_inline(sizeof(ARM64RI6)); ri6->tag = ARM64ri6_R; ri6->ARM64ri6.R.reg = reg; return ri6; @@ -828,7 +828,7 @@ static const HChar* showARM64VecNarrowOp(ARM64VecNarrowOp op) { ARM64Instr* ARM64Instr_Arith ( HReg dst, HReg argL, ARM64RIA* argR, Bool isAdd ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_Arith; i->ARM64in.Arith.dst = dst; i->ARM64in.Arith.argL = argL; @@ -837,7 +837,7 @@ ARM64Instr* ARM64Instr_Arith ( HReg dst, return i; } ARM64Instr* ARM64Instr_Cmp ( HReg argL, ARM64RIA* argR, Bool is64 ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_Cmp; i->ARM64in.Cmp.argL = argL; i->ARM64in.Cmp.argR = argR; @@ -846,7 +846,7 @@ ARM64Instr* ARM64Instr_Cmp ( HReg argL, ARM64RIA* argR, Bool is64 ) { } ARM64Instr* ARM64Instr_Logic ( HReg dst, HReg argL, ARM64RIL* argR, ARM64LogicOp op ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_Logic; i->ARM64in.Logic.dst = dst; i->ARM64in.Logic.argL = argL; @@ -855,7 +855,7 @@ ARM64Instr* ARM64Instr_Logic ( HReg dst, return i; } ARM64Instr* ARM64Instr_Test ( HReg argL, ARM64RIL* argR ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_Test; i->ARM64in.Test.argL = argL; i->ARM64in.Test.argR = argR; @@ -863,7 +863,7 @@ ARM64Instr* ARM64Instr_Test ( HReg argL, ARM64RIL* argR ) { } ARM64Instr* ARM64Instr_Shift ( HReg dst, HReg argL, ARM64RI6* argR, ARM64ShiftOp op ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_Shift; i->ARM64in.Shift.dst = dst; i->ARM64in.Shift.argL = argL; @@ -872,7 +872,7 @@ ARM64Instr* ARM64Instr_Shift ( HReg dst, return i; } ARM64Instr* ARM64Instr_Unary ( HReg dst, HReg src, ARM64UnaryOp op ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_Unary; i->ARM64in.Unary.dst = dst; i->ARM64in.Unary.src = src; @@ -880,7 +880,7 @@ ARM64Instr* ARM64Instr_Unary ( HReg dst, HReg src, ARM64UnaryOp op ) { return i; } ARM64Instr* ARM64Instr_MovI ( HReg dst, HReg src ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_MovI; i->ARM64in.MovI.dst = dst; i->ARM64in.MovI.src = src; @@ -889,14 +889,14 @@ ARM64Instr* ARM64Instr_MovI ( HReg dst, HReg src ) { return i; } ARM64Instr* ARM64Instr_Imm64 ( HReg dst, ULong imm64 ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_Imm64; i->ARM64in.Imm64.dst = dst; i->ARM64in.Imm64.imm64 = imm64; return i; } ARM64Instr* ARM64Instr_LdSt64 ( Bool isLoad, HReg rD, ARM64AMode* amode ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_LdSt64; i->ARM64in.LdSt64.isLoad = isLoad; i->ARM64in.LdSt64.rD = rD; @@ -904,7 +904,7 @@ ARM64Instr* ARM64Instr_LdSt64 ( Bool isLoad, HReg rD, ARM64AMode* amode ) { return i; } ARM64Instr* ARM64Instr_LdSt32 ( Bool isLoad, HReg rD, ARM64AMode* amode ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_LdSt32; i->ARM64in.LdSt32.isLoad = isLoad; i->ARM64in.LdSt32.rD = rD; @@ -912,7 +912,7 @@ ARM64Instr* ARM64Instr_LdSt32 ( Bool isLoad, HReg rD, ARM64AMode* amode ) { return i; } ARM64Instr* ARM64Instr_LdSt16 ( Bool isLoad, HReg rD, ARM64AMode* amode ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_LdSt16; i->ARM64in.LdSt16.isLoad = isLoad; i->ARM64in.LdSt16.rD = rD; @@ -920,7 +920,7 @@ ARM64Instr* ARM64Instr_LdSt16 ( Bool isLoad, HReg rD, ARM64AMode* amode ) { return i; } ARM64Instr* ARM64Instr_LdSt8 ( Bool isLoad, HReg rD, ARM64AMode* amode ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_LdSt8; i->ARM64in.LdSt8.isLoad = isLoad; i->ARM64in.LdSt8.rD = rD; @@ -929,7 +929,7 @@ ARM64Instr* ARM64Instr_LdSt8 ( Bool isLoad, HReg rD, ARM64AMode* amode ) { } ARM64Instr* ARM64Instr_XDirect ( Addr64 dstGA, ARM64AMode* amPC, ARM64CondCode cond, Bool toFastEP ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_XDirect; i->ARM64in.XDirect.dstGA = dstGA; i->ARM64in.XDirect.amPC = amPC; @@ -939,7 +939,7 @@ ARM64Instr* ARM64Instr_XDirect ( Addr64 dstGA, ARM64AMode* amPC, } ARM64Instr* ARM64Instr_XIndir ( HReg dstGA, ARM64AMode* amPC, ARM64CondCode cond ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_XIndir; i->ARM64in.XIndir.dstGA = dstGA; i->ARM64in.XIndir.amPC = amPC; @@ -948,7 +948,7 @@ ARM64Instr* ARM64Instr_XIndir ( HReg dstGA, ARM64AMode* amPC, } ARM64Instr* ARM64Instr_XAssisted ( HReg dstGA, ARM64AMode* amPC, ARM64CondCode cond, IRJumpKind jk ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_XAssisted; i->ARM64in.XAssisted.dstGA = dstGA; i->ARM64in.XAssisted.amPC = amPC; @@ -958,7 +958,7 @@ ARM64Instr* ARM64Instr_XAssisted ( HReg dstGA, ARM64AMode* amPC, } ARM64Instr* ARM64Instr_CSel ( HReg dst, HReg argL, HReg argR, ARM64CondCode cond ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_CSel; i->ARM64in.CSel.dst = dst; i->ARM64in.CSel.argL = argL; @@ -968,7 +968,7 @@ ARM64Instr* ARM64Instr_CSel ( HReg dst, HReg argL, HReg argR, } ARM64Instr* ARM64Instr_Call ( ARM64CondCode cond, Addr64 target, Int nArgRegs, RetLoc rloc ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_Call; i->ARM64in.Call.cond = cond; i->ARM64in.Call.target = target; @@ -978,7 +978,7 @@ ARM64Instr* ARM64Instr_Call ( ARM64CondCode cond, Addr64 target, Int nArgRegs, return i; } extern ARM64Instr* ARM64Instr_AddToSP ( Int simm ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_AddToSP; i->ARM64in.AddToSP.simm = simm; vassert(-4096 < simm && simm < 4096); @@ -986,14 +986,14 @@ extern ARM64Instr* ARM64Instr_AddToSP ( Int simm ) { return i; } extern ARM64Instr* ARM64Instr_FromSP ( HReg dst ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_FromSP; i->ARM64in.FromSP.dst = dst; return i; } ARM64Instr* ARM64Instr_Mul ( HReg dst, HReg argL, HReg argR, ARM64MulOp op ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_Mul; i->ARM64in.Mul.dst = dst; i->ARM64in.Mul.argL = argL; @@ -1002,26 +1002,26 @@ ARM64Instr* ARM64Instr_Mul ( HReg dst, HReg argL, HReg argR, return i; } ARM64Instr* ARM64Instr_LdrEX ( Int szB ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_LdrEX; i->ARM64in.LdrEX.szB = szB; vassert(szB == 8 || szB == 4 || szB == 2 || szB == 1); return i; } ARM64Instr* ARM64Instr_StrEX ( Int szB ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_StrEX; i->ARM64in.StrEX.szB = szB; vassert(szB == 8 || szB == 4 || szB == 2 || szB == 1); return i; } ARM64Instr* ARM64Instr_MFence ( void ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_MFence; return i; } ARM64Instr* ARM64Instr_VLdStS ( Bool isLoad, HReg sD, HReg rN, UInt uimm12 ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_VLdStS; i->ARM64in.VLdStS.isLoad = isLoad; i->ARM64in.VLdStS.sD = sD; @@ -1031,7 +1031,7 @@ ARM64Instr* ARM64Instr_VLdStS ( Bool isLoad, HReg sD, HReg rN, UInt uimm12 ) { return i; } ARM64Instr* ARM64Instr_VLdStD ( Bool isLoad, HReg dD, HReg rN, UInt uimm12 ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_VLdStD; i->ARM64in.VLdStD.isLoad = isLoad; i->ARM64in.VLdStD.dD = dD; @@ -1041,7 +1041,7 @@ ARM64Instr* ARM64Instr_VLdStD ( Bool isLoad, HReg dD, HReg rN, UInt uimm12 ) { return i; } ARM64Instr* ARM64Instr_VLdStQ ( Bool isLoad, HReg rQ, HReg rN ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_VLdStQ; i->ARM64in.VLdStQ.isLoad = isLoad; i->ARM64in.VLdStQ.rQ = rQ; @@ -1049,7 +1049,7 @@ ARM64Instr* ARM64Instr_VLdStQ ( Bool isLoad, HReg rQ, HReg rN ) { return i; } ARM64Instr* ARM64Instr_VCvtI2F ( ARM64CvtOp how, HReg rD, HReg rS ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_VCvtI2F; i->ARM64in.VCvtI2F.how = how; i->ARM64in.VCvtI2F.rD = rD; @@ -1058,7 +1058,7 @@ ARM64Instr* ARM64Instr_VCvtI2F ( ARM64CvtOp how, HReg rD, HReg rS ) { } ARM64Instr* ARM64Instr_VCvtF2I ( ARM64CvtOp how, HReg rD, HReg rS, UChar armRM ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_VCvtF2I; i->ARM64in.VCvtF2I.how = how; i->ARM64in.VCvtF2I.rD = rD; @@ -1068,7 +1068,7 @@ ARM64Instr* ARM64Instr_VCvtF2I ( ARM64CvtOp how, HReg rD, HReg rS, return i; } ARM64Instr* ARM64Instr_VCvtSD ( Bool sToD, HReg dst, HReg src ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_VCvtSD; i->ARM64in.VCvtSD.sToD = sToD; i->ARM64in.VCvtSD.dst = dst; @@ -1076,7 +1076,7 @@ ARM64Instr* ARM64Instr_VCvtSD ( Bool sToD, HReg dst, HReg src ) { return i; } ARM64Instr* ARM64Instr_VUnaryD ( ARM64FpUnaryOp op, HReg dst, HReg src ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_VUnaryD; i->ARM64in.VUnaryD.op = op; i->ARM64in.VUnaryD.dst = dst; @@ -1084,7 +1084,7 @@ ARM64Instr* ARM64Instr_VUnaryD ( ARM64FpUnaryOp op, HReg dst, HReg src ) { return i; } ARM64Instr* ARM64Instr_VUnaryS ( ARM64FpUnaryOp op, HReg dst, HReg src ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_VUnaryS; i->ARM64in.VUnaryS.op = op; i->ARM64in.VUnaryS.dst = dst; @@ -1093,7 +1093,7 @@ ARM64Instr* ARM64Instr_VUnaryS ( ARM64FpUnaryOp op, HReg dst, HReg src ) { } ARM64Instr* ARM64Instr_VBinD ( ARM64FpBinOp op, HReg dst, HReg argL, HReg argR ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_VBinD; i->ARM64in.VBinD.op = op; i->ARM64in.VBinD.dst = dst; @@ -1103,7 +1103,7 @@ ARM64Instr* ARM64Instr_VBinD ( ARM64FpBinOp op, } ARM64Instr* ARM64Instr_VBinS ( ARM64FpBinOp op, HReg dst, HReg argL, HReg argR ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_VBinS; i->ARM64in.VBinS.op = op; i->ARM64in.VBinS.dst = dst; @@ -1112,14 +1112,14 @@ ARM64Instr* ARM64Instr_VBinS ( ARM64FpBinOp op, return i; } ARM64Instr* ARM64Instr_VCmpD ( HReg argL, HReg argR ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_VCmpD; i->ARM64in.VCmpD.argL = argL; i->ARM64in.VCmpD.argR = argR; return i; } ARM64Instr* ARM64Instr_VCmpS ( HReg argL, HReg argR ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_VCmpS; i->ARM64in.VCmpS.argL = argL; i->ARM64in.VCmpS.argR = argR; @@ -1127,7 +1127,7 @@ ARM64Instr* ARM64Instr_VCmpS ( HReg argL, HReg argR ) { } ARM64Instr* ARM64Instr_VFCSel ( HReg dst, HReg argL, HReg argR, ARM64CondCode cond, Bool isD ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_VFCSel; i->ARM64in.VFCSel.dst = dst; i->ARM64in.VFCSel.argL = argL; @@ -1137,14 +1137,14 @@ ARM64Instr* ARM64Instr_VFCSel ( HReg dst, HReg argL, HReg argR, return i; } ARM64Instr* ARM64Instr_FPCR ( Bool toFPCR, HReg iReg ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_FPCR; i->ARM64in.FPCR.toFPCR = toFPCR; i->ARM64in.FPCR.iReg = iReg; return i; } ARM64Instr* ARM64Instr_FPSR ( Bool toFPSR, HReg iReg ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_FPSR; i->ARM64in.FPSR.toFPSR = toFPSR; i->ARM64in.FPSR.iReg = iReg; @@ -1152,7 +1152,7 @@ ARM64Instr* ARM64Instr_FPSR ( Bool toFPSR, HReg iReg ) { } ARM64Instr* ARM64Instr_VBinV ( ARM64VecBinOp op, HReg dst, HReg argL, HReg argR ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_VBinV; i->ARM64in.VBinV.op = op; i->ARM64in.VBinV.dst = dst; @@ -1161,7 +1161,7 @@ ARM64Instr* ARM64Instr_VBinV ( ARM64VecBinOp op, return i; } ARM64Instr* ARM64Instr_VModifyV ( ARM64VecModifyOp op, HReg mod, HReg arg ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_VModifyV; i->ARM64in.VModifyV.op = op; i->ARM64in.VModifyV.mod = mod; @@ -1169,7 +1169,7 @@ ARM64Instr* ARM64Instr_VModifyV ( ARM64VecModifyOp op, HReg mod, HReg arg ) { return i; } ARM64Instr* ARM64Instr_VUnaryV ( ARM64VecUnaryOp op, HReg dst, HReg arg ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_VUnaryV; i->ARM64in.VUnaryV.op = op; i->ARM64in.VUnaryV.dst = dst; @@ -1178,7 +1178,7 @@ ARM64Instr* ARM64Instr_VUnaryV ( ARM64VecUnaryOp op, HReg dst, HReg arg ) { } ARM64Instr* ARM64Instr_VNarrowV ( ARM64VecNarrowOp op, UInt dszBlg2, HReg dst, HReg src ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_VNarrowV; i->ARM64in.VNarrowV.op = op; i->ARM64in.VNarrowV.dszBlg2 = dszBlg2; @@ -1189,7 +1189,7 @@ ARM64Instr* ARM64Instr_VNarrowV ( ARM64VecNarrowOp op, } ARM64Instr* ARM64Instr_VShiftImmV ( ARM64VecShiftImmOp op, HReg dst, HReg src, UInt amt ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_VShiftImmV; i->ARM64in.VShiftImmV.op = op; i->ARM64in.VShiftImmV.dst = dst; @@ -1245,7 +1245,7 @@ ARM64Instr* ARM64Instr_VShiftImmV ( ARM64VecShiftImmOp op, return i; } ARM64Instr* ARM64Instr_VExtV ( HReg dst, HReg srcLo, HReg srcHi, UInt amtB ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_VExtV; i->ARM64in.VExtV.dst = dst; i->ARM64in.VExtV.srcLo = srcLo; @@ -1255,7 +1255,7 @@ ARM64Instr* ARM64Instr_VExtV ( HReg dst, HReg srcLo, HReg srcHi, UInt amtB ) { return i; } ARM64Instr* ARM64Instr_VImmQ (HReg rQ, UShort imm) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_VImmQ; i->ARM64in.VImmQ.rQ = rQ; i->ARM64in.VImmQ.imm = imm; @@ -1270,21 +1270,21 @@ ARM64Instr* ARM64Instr_VImmQ (HReg rQ, UShort imm) { return i; } ARM64Instr* ARM64Instr_VDfromX ( HReg rD, HReg rX ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_VDfromX; i->ARM64in.VDfromX.rD = rD; i->ARM64in.VDfromX.rX = rX; return i; } ARM64Instr* ARM64Instr_VQfromX ( HReg rQ, HReg rXlo ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_VQfromX; i->ARM64in.VQfromX.rQ = rQ; i->ARM64in.VQfromX.rXlo = rXlo; return i; } ARM64Instr* ARM64Instr_VQfromXX ( HReg rQ, HReg rXhi, HReg rXlo ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_VQfromXX; i->ARM64in.VQfromXX.rQ = rQ; i->ARM64in.VQfromXX.rXhi = rXhi; @@ -1292,7 +1292,7 @@ ARM64Instr* ARM64Instr_VQfromXX ( HReg rQ, HReg rXhi, HReg rXlo ) { return i; } ARM64Instr* ARM64Instr_VXfromQ ( HReg rX, HReg rQ, UInt laneNo ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_VXfromQ; i->ARM64in.VXfromQ.rX = rX; i->ARM64in.VXfromQ.rQ = rQ; @@ -1301,7 +1301,7 @@ ARM64Instr* ARM64Instr_VXfromQ ( HReg rX, HReg rQ, UInt laneNo ) { return i; } ARM64Instr* ARM64Instr_VXfromDorS ( HReg rX, HReg rDorS, Bool fromD ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_VXfromDorS; i->ARM64in.VXfromDorS.rX = rX; i->ARM64in.VXfromDorS.rDorS = rDorS; @@ -1309,7 +1309,7 @@ ARM64Instr* ARM64Instr_VXfromDorS ( HReg rX, HReg rDorS, Bool fromD ) { return i; } ARM64Instr* ARM64Instr_VMov ( UInt szB, HReg dst, HReg src ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_VMov; i->ARM64in.VMov.szB = szB; i->ARM64in.VMov.dst = dst; @@ -1330,14 +1330,14 @@ ARM64Instr* ARM64Instr_VMov ( UInt szB, HReg dst, HReg src ) { } ARM64Instr* ARM64Instr_EvCheck ( ARM64AMode* amCounter, ARM64AMode* amFailAddr ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_EvCheck; i->ARM64in.EvCheck.amCounter = amCounter; i->ARM64in.EvCheck.amFailAddr = amFailAddr; return i; } ARM64Instr* ARM64Instr_ProfInc ( void ) { - ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr)); + ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr)); i->tag = ARM64in_ProfInc; return i; } diff --git a/VEX/priv/host_arm64_isel.c b/VEX/priv/host_arm64_isel.c index afde38f610..669939af21 100644 --- a/VEX/priv/host_arm64_isel.c +++ b/VEX/priv/host_arm64_isel.c @@ -3930,7 +3930,7 @@ HInstrArray* iselSB_ARM64 ( const IRSB* bb, vassert(sizeof(ARM64Instr) <= 32); /* Make up an initial environment to use. */ - env = LibVEX_Alloc(sizeof(ISelEnv)); + env = LibVEX_Alloc_inline(sizeof(ISelEnv)); env->vreg_ctr = 0; /* Set up output code array. */ @@ -3942,8 +3942,8 @@ HInstrArray* iselSB_ARM64 ( const IRSB* bb, /* Make up an IRTemp -> virtual HReg mapping. This doesn't change as we go along. */ env->n_vregmap = bb->tyenv->types_used; - env->vregmap = LibVEX_Alloc(env->n_vregmap * sizeof(HReg)); - env->vregmapHI = LibVEX_Alloc(env->n_vregmap * sizeof(HReg)); + env->vregmap = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg)); + env->vregmapHI = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg)); /* and finally ... */ env->chainingAllowed = chainingAllowed; diff --git a/VEX/priv/host_arm_defs.c b/VEX/priv/host_arm_defs.c index 9fa93c8c0b..bd2e9d38d9 100644 --- a/VEX/priv/host_arm_defs.c +++ b/VEX/priv/host_arm_defs.c @@ -123,7 +123,7 @@ void getAllocableRegs_ARM ( Int* nregs, HReg** arr ) { Int i = 0; *nregs = 26; - *arr = LibVEX_Alloc(*nregs * sizeof(HReg)); + *arr = LibVEX_Alloc_inline(*nregs * sizeof(HReg)); // callee saves ones are listed first, since we prefer them // if they're available (*arr)[i++] = hregARM_R4(); @@ -217,7 +217,7 @@ const HChar* showARMCondCode ( ARMCondCode cond ) { /* --------- Mem AModes: Addressing Mode 1 --------- */ ARMAMode1* ARMAMode1_RI ( HReg reg, Int simm13 ) { - ARMAMode1* am = LibVEX_Alloc(sizeof(ARMAMode1)); + ARMAMode1* am = LibVEX_Alloc_inline(sizeof(ARMAMode1)); am->tag = ARMam1_RI; am->ARMam1.RI.reg = reg; am->ARMam1.RI.simm13 = simm13; @@ -225,7 +225,7 @@ ARMAMode1* ARMAMode1_RI ( HReg reg, Int simm13 ) { return am; } ARMAMode1* ARMAMode1_RRS ( HReg base, HReg index, UInt shift ) { - ARMAMode1* am = LibVEX_Alloc(sizeof(ARMAMode1)); + ARMAMode1* am = LibVEX_Alloc_inline(sizeof(ARMAMode1)); am->tag = ARMam1_RRS; am->ARMam1.RRS.base = base; am->ARMam1.RRS.index = index; @@ -285,7 +285,7 @@ static void mapRegs_ARMAMode1 ( HRegRemap* m, ARMAMode1* am ) { /* --------- Mem AModes: Addressing Mode 2 --------- */ ARMAMode2* ARMAMode2_RI ( HReg reg, Int simm9 ) { - ARMAMode2* am = LibVEX_Alloc(sizeof(ARMAMode2)); + ARMAMode2* am = LibVEX_Alloc_inline(sizeof(ARMAMode2)); am->tag = ARMam2_RI; am->ARMam2.RI.reg = reg; am->ARMam2.RI.simm9 = simm9; @@ -293,7 +293,7 @@ ARMAMode2* ARMAMode2_RI ( HReg reg, Int simm9 ) { return am; } ARMAMode2* ARMAMode2_RR ( HReg base, HReg index ) { - ARMAMode2* am = LibVEX_Alloc(sizeof(ARMAMode2)); + ARMAMode2* am = LibVEX_Alloc_inline(sizeof(ARMAMode2)); am->tag = ARMam2_RR; am->ARMam2.RR.base = base; am->ARMam2.RR.index = index; @@ -351,7 +351,7 @@ static void mapRegs_ARMAMode2 ( HRegRemap* m, ARMAMode2* am ) { /* --------- Mem AModes: Addressing Mode VFP --------- */ ARMAModeV* mkARMAModeV ( HReg reg, Int simm11 ) { - ARMAModeV* am = LibVEX_Alloc(sizeof(ARMAModeV)); + ARMAModeV* am = LibVEX_Alloc_inline(sizeof(ARMAModeV)); vassert(simm11 >= -1020 && simm11 <= 1020); vassert(0 == (simm11 & 3)); am->reg = reg; @@ -377,7 +377,7 @@ static void mapRegs_ARMAModeV ( HRegRemap* m, ARMAModeV* am ) { /* --------- Mem AModes: Addressing Mode Neon ------- */ ARMAModeN *mkARMAModeN_RR ( HReg rN, HReg rM ) { - ARMAModeN* am = LibVEX_Alloc(sizeof(ARMAModeN)); + ARMAModeN* am = LibVEX_Alloc_inline(sizeof(ARMAModeN)); am->tag = ARMamN_RR; am->ARMamN.RR.rN = rN; am->ARMamN.RR.rM = rM; @@ -385,7 +385,7 @@ ARMAModeN *mkARMAModeN_RR ( HReg rN, HReg rM ) { } ARMAModeN *mkARMAModeN_R ( HReg rN ) { - ARMAModeN* am = LibVEX_Alloc(sizeof(ARMAModeN)); + ARMAModeN* am = LibVEX_Alloc_inline(sizeof(ARMAModeN)); am->tag = ARMamN_R; am->ARMamN.R.rN = rN; return am; @@ -435,7 +435,7 @@ static UInt ROR32 ( UInt x, UInt sh ) { } ARMRI84* ARMRI84_I84 ( UShort imm8, UShort imm4 ) { - ARMRI84* ri84 = LibVEX_Alloc(sizeof(ARMRI84)); + ARMRI84* ri84 = LibVEX_Alloc_inline(sizeof(ARMRI84)); ri84->tag = ARMri84_I84; ri84->ARMri84.I84.imm8 = imm8; ri84->ARMri84.I84.imm4 = imm4; @@ -444,7 +444,7 @@ ARMRI84* ARMRI84_I84 ( UShort imm8, UShort imm4 ) { return ri84; } ARMRI84* ARMRI84_R ( HReg reg ) { - ARMRI84* ri84 = LibVEX_Alloc(sizeof(ARMRI84)); + ARMRI84* ri84 = LibVEX_Alloc_inline(sizeof(ARMRI84)); ri84->tag = ARMri84_R; ri84->ARMri84.R.reg = reg; return ri84; @@ -492,14 +492,14 @@ static void mapRegs_ARMRI84 ( HRegRemap* m, ARMRI84* ri84 ) { /* --------- Reg or imm5 operands --------- */ ARMRI5* ARMRI5_I5 ( UInt imm5 ) { - ARMRI5* ri5 = LibVEX_Alloc(sizeof(ARMRI5)); + ARMRI5* ri5 = LibVEX_Alloc_inline(sizeof(ARMRI5)); ri5->tag = ARMri5_I5; ri5->ARMri5.I5.imm5 = imm5; vassert(imm5 > 0 && imm5 <= 31); // zero is not allowed return ri5; } ARMRI5* ARMRI5_R ( HReg reg ) { - ARMRI5* ri5 = LibVEX_Alloc(sizeof(ARMRI5)); + ARMRI5* ri5 = LibVEX_Alloc_inline(sizeof(ARMRI5)); ri5->tag = ARMri5_R; ri5->ARMri5.R.reg = reg; return ri5; @@ -545,7 +545,7 @@ static void mapRegs_ARMRI5 ( HRegRemap* m, ARMRI5* ri5 ) { /* -------- Neon Immediate operatnd --------- */ ARMNImm* ARMNImm_TI ( UInt type, UInt imm8 ) { - ARMNImm* i = LibVEX_Alloc(sizeof(ARMNImm)); + ARMNImm* i = LibVEX_Alloc_inline(sizeof(ARMNImm)); i->type = type; i->imm8 = imm8; return i; @@ -659,7 +659,7 @@ void ppARMNImm (ARMNImm* i) { ARMNRS* mkARMNRS(ARMNRS_tag tag, HReg reg, UInt index) { - ARMNRS *p = LibVEX_Alloc(sizeof(ARMNRS)); + ARMNRS *p = LibVEX_Alloc_inline(sizeof(ARMNRS)); p->tag = tag; p->reg = reg; p->index = index; @@ -1099,7 +1099,7 @@ static const HChar* showARMNeonDataSize ( const ARMInstr* i ) ARMInstr* ARMInstr_Alu ( ARMAluOp op, HReg dst, HReg argL, ARMRI84* argR ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_Alu; i->ARMin.Alu.op = op; i->ARMin.Alu.dst = dst; @@ -1109,7 +1109,7 @@ ARMInstr* ARMInstr_Alu ( ARMAluOp op, } ARMInstr* ARMInstr_Shift ( ARMShiftOp op, HReg dst, HReg argL, ARMRI5* argR ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_Shift; i->ARMin.Shift.op = op; i->ARMin.Shift.dst = dst; @@ -1118,7 +1118,7 @@ ARMInstr* ARMInstr_Shift ( ARMShiftOp op, return i; } ARMInstr* ARMInstr_Unary ( ARMUnaryOp op, HReg dst, HReg src ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_Unary; i->ARMin.Unary.op = op; i->ARMin.Unary.dst = dst; @@ -1126,7 +1126,7 @@ ARMInstr* ARMInstr_Unary ( ARMUnaryOp op, HReg dst, HReg src ) { return i; } ARMInstr* ARMInstr_CmpOrTst ( Bool isCmp, HReg argL, ARMRI84* argR ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_CmpOrTst; i->ARMin.CmpOrTst.isCmp = isCmp; i->ARMin.CmpOrTst.argL = argL; @@ -1134,14 +1134,14 @@ ARMInstr* ARMInstr_CmpOrTst ( Bool isCmp, HReg argL, ARMRI84* argR ) { return i; } ARMInstr* ARMInstr_Mov ( HReg dst, ARMRI84* src ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_Mov; i->ARMin.Mov.dst = dst; i->ARMin.Mov.src = src; return i; } ARMInstr* ARMInstr_Imm32 ( HReg dst, UInt imm32 ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_Imm32; i->ARMin.Imm32.dst = dst; i->ARMin.Imm32.imm32 = imm32; @@ -1149,7 +1149,7 @@ ARMInstr* ARMInstr_Imm32 ( HReg dst, UInt imm32 ) { } ARMInstr* ARMInstr_LdSt32 ( ARMCondCode cc, Bool isLoad, HReg rD, ARMAMode1* amode ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_LdSt32; i->ARMin.LdSt32.cc = cc; i->ARMin.LdSt32.isLoad = isLoad; @@ -1161,7 +1161,7 @@ ARMInstr* ARMInstr_LdSt32 ( ARMCondCode cc, ARMInstr* ARMInstr_LdSt16 ( ARMCondCode cc, Bool isLoad, Bool signedLoad, HReg rD, ARMAMode2* amode ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_LdSt16; i->ARMin.LdSt16.cc = cc; i->ARMin.LdSt16.isLoad = isLoad; @@ -1173,7 +1173,7 @@ ARMInstr* ARMInstr_LdSt16 ( ARMCondCode cc, } ARMInstr* ARMInstr_LdSt8U ( ARMCondCode cc, Bool isLoad, HReg rD, ARMAMode1* amode ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_LdSt8U; i->ARMin.LdSt8U.cc = cc; i->ARMin.LdSt8U.isLoad = isLoad; @@ -1183,7 +1183,7 @@ ARMInstr* ARMInstr_LdSt8U ( ARMCondCode cc, return i; } ARMInstr* ARMInstr_Ld8S ( ARMCondCode cc, HReg rD, ARMAMode2* amode ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_Ld8S; i->ARMin.Ld8S.cc = cc; i->ARMin.Ld8S.rD = rD; @@ -1193,7 +1193,7 @@ ARMInstr* ARMInstr_Ld8S ( ARMCondCode cc, HReg rD, ARMAMode2* amode ) { } ARMInstr* ARMInstr_XDirect ( Addr32 dstGA, ARMAMode1* amR15T, ARMCondCode cond, Bool toFastEP ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_XDirect; i->ARMin.XDirect.dstGA = dstGA; i->ARMin.XDirect.amR15T = amR15T; @@ -1203,7 +1203,7 @@ ARMInstr* ARMInstr_XDirect ( Addr32 dstGA, ARMAMode1* amR15T, } ARMInstr* ARMInstr_XIndir ( HReg dstGA, ARMAMode1* amR15T, ARMCondCode cond ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_XIndir; i->ARMin.XIndir.dstGA = dstGA; i->ARMin.XIndir.amR15T = amR15T; @@ -1212,7 +1212,7 @@ ARMInstr* ARMInstr_XIndir ( HReg dstGA, ARMAMode1* amR15T, } ARMInstr* ARMInstr_XAssisted ( HReg dstGA, ARMAMode1* amR15T, ARMCondCode cond, IRJumpKind jk ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_XAssisted; i->ARMin.XAssisted.dstGA = dstGA; i->ARMin.XAssisted.amR15T = amR15T; @@ -1221,7 +1221,7 @@ ARMInstr* ARMInstr_XAssisted ( HReg dstGA, ARMAMode1* amR15T, return i; } ARMInstr* ARMInstr_CMov ( ARMCondCode cond, HReg dst, ARMRI84* src ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_CMov; i->ARMin.CMov.cond = cond; i->ARMin.CMov.dst = dst; @@ -1231,7 +1231,7 @@ ARMInstr* ARMInstr_CMov ( ARMCondCode cond, HReg dst, ARMRI84* src ) { } ARMInstr* ARMInstr_Call ( ARMCondCode cond, Addr32 target, Int nArgRegs, RetLoc rloc ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_Call; i->ARMin.Call.cond = cond; i->ARMin.Call.target = target; @@ -1241,27 +1241,27 @@ ARMInstr* ARMInstr_Call ( ARMCondCode cond, Addr32 target, Int nArgRegs, return i; } ARMInstr* ARMInstr_Mul ( ARMMulOp op ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_Mul; i->ARMin.Mul.op = op; return i; } ARMInstr* ARMInstr_LdrEX ( Int szB ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_LdrEX; i->ARMin.LdrEX.szB = szB; vassert(szB == 8 || szB == 4 || szB == 2 || szB == 1); return i; } ARMInstr* ARMInstr_StrEX ( Int szB ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_StrEX; i->ARMin.StrEX.szB = szB; vassert(szB == 8 || szB == 4 || szB == 2 || szB == 1); return i; } ARMInstr* ARMInstr_VLdStD ( Bool isLoad, HReg dD, ARMAModeV* am ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_VLdStD; i->ARMin.VLdStD.isLoad = isLoad; i->ARMin.VLdStD.dD = dD; @@ -1269,7 +1269,7 @@ ARMInstr* ARMInstr_VLdStD ( Bool isLoad, HReg dD, ARMAModeV* am ) { return i; } ARMInstr* ARMInstr_VLdStS ( Bool isLoad, HReg fD, ARMAModeV* am ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_VLdStS; i->ARMin.VLdStS.isLoad = isLoad; i->ARMin.VLdStS.fD = fD; @@ -1277,7 +1277,7 @@ ARMInstr* ARMInstr_VLdStS ( Bool isLoad, HReg fD, ARMAModeV* am ) { return i; } ARMInstr* ARMInstr_VAluD ( ARMVfpOp op, HReg dst, HReg argL, HReg argR ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_VAluD; i->ARMin.VAluD.op = op; i->ARMin.VAluD.dst = dst; @@ -1286,7 +1286,7 @@ ARMInstr* ARMInstr_VAluD ( ARMVfpOp op, HReg dst, HReg argL, HReg argR ) { return i; } ARMInstr* ARMInstr_VAluS ( ARMVfpOp op, HReg dst, HReg argL, HReg argR ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_VAluS; i->ARMin.VAluS.op = op; i->ARMin.VAluS.dst = dst; @@ -1295,7 +1295,7 @@ ARMInstr* ARMInstr_VAluS ( ARMVfpOp op, HReg dst, HReg argL, HReg argR ) { return i; } ARMInstr* ARMInstr_VUnaryD ( ARMVfpUnaryOp op, HReg dst, HReg src ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_VUnaryD; i->ARMin.VUnaryD.op = op; i->ARMin.VUnaryD.dst = dst; @@ -1303,7 +1303,7 @@ ARMInstr* ARMInstr_VUnaryD ( ARMVfpUnaryOp op, HReg dst, HReg src ) { return i; } ARMInstr* ARMInstr_VUnaryS ( ARMVfpUnaryOp op, HReg dst, HReg src ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_VUnaryS; i->ARMin.VUnaryS.op = op; i->ARMin.VUnaryS.dst = dst; @@ -1311,14 +1311,14 @@ ARMInstr* ARMInstr_VUnaryS ( ARMVfpUnaryOp op, HReg dst, HReg src ) { return i; } ARMInstr* ARMInstr_VCmpD ( HReg argL, HReg argR ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_VCmpD; i->ARMin.VCmpD.argL = argL; i->ARMin.VCmpD.argR = argR; return i; } ARMInstr* ARMInstr_VCMovD ( ARMCondCode cond, HReg dst, HReg src ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_VCMovD; i->ARMin.VCMovD.cond = cond; i->ARMin.VCMovD.dst = dst; @@ -1327,7 +1327,7 @@ ARMInstr* ARMInstr_VCMovD ( ARMCondCode cond, HReg dst, HReg src ) { return i; } ARMInstr* ARMInstr_VCMovS ( ARMCondCode cond, HReg dst, HReg src ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_VCMovS; i->ARMin.VCMovS.cond = cond; i->ARMin.VCMovS.dst = dst; @@ -1336,7 +1336,7 @@ ARMInstr* ARMInstr_VCMovS ( ARMCondCode cond, HReg dst, HReg src ) { return i; } ARMInstr* ARMInstr_VCvtSD ( Bool sToD, HReg dst, HReg src ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_VCvtSD; i->ARMin.VCvtSD.sToD = sToD; i->ARMin.VCvtSD.dst = dst; @@ -1344,7 +1344,7 @@ ARMInstr* ARMInstr_VCvtSD ( Bool sToD, HReg dst, HReg src ) { return i; } ARMInstr* ARMInstr_VXferD ( Bool toD, HReg dD, HReg rHi, HReg rLo ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_VXferD; i->ARMin.VXferD.toD = toD; i->ARMin.VXferD.dD = dD; @@ -1353,7 +1353,7 @@ ARMInstr* ARMInstr_VXferD ( Bool toD, HReg dD, HReg rHi, HReg rLo ) { return i; } ARMInstr* ARMInstr_VXferS ( Bool toS, HReg fD, HReg rLo ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_VXferS; i->ARMin.VXferS.toS = toS; i->ARMin.VXferS.fD = fD; @@ -1362,7 +1362,7 @@ ARMInstr* ARMInstr_VXferS ( Bool toS, HReg fD, HReg rLo ) { } ARMInstr* ARMInstr_VCvtID ( Bool iToD, Bool syned, HReg dst, HReg src ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_VCvtID; i->ARMin.VCvtID.iToD = iToD; i->ARMin.VCvtID.syned = syned; @@ -1371,25 +1371,25 @@ ARMInstr* ARMInstr_VCvtID ( Bool iToD, Bool syned, return i; } ARMInstr* ARMInstr_FPSCR ( Bool toFPSCR, HReg iReg ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_FPSCR; i->ARMin.FPSCR.toFPSCR = toFPSCR; i->ARMin.FPSCR.iReg = iReg; return i; } ARMInstr* ARMInstr_MFence ( void ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_MFence; return i; } ARMInstr* ARMInstr_CLREX( void ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_CLREX; return i; } ARMInstr* ARMInstr_NLdStQ ( Bool isLoad, HReg dQ, ARMAModeN *amode ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_NLdStQ; i->ARMin.NLdStQ.isLoad = isLoad; i->ARMin.NLdStQ.dQ = dQ; @@ -1398,7 +1398,7 @@ ARMInstr* ARMInstr_NLdStQ ( Bool isLoad, HReg dQ, ARMAModeN *amode ) { } ARMInstr* ARMInstr_NLdStD ( Bool isLoad, HReg dD, ARMAModeN *amode ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_NLdStD; i->ARMin.NLdStD.isLoad = isLoad; i->ARMin.NLdStD.dD = dD; @@ -1408,7 +1408,7 @@ ARMInstr* ARMInstr_NLdStD ( Bool isLoad, HReg dD, ARMAModeN *amode ) { ARMInstr* ARMInstr_NUnary ( ARMNeonUnOp op, HReg dQ, HReg nQ, UInt size, Bool Q ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_NUnary; i->ARMin.NUnary.op = op; i->ARMin.NUnary.src = nQ; @@ -1420,7 +1420,7 @@ ARMInstr* ARMInstr_NUnary ( ARMNeonUnOp op, HReg dQ, HReg nQ, ARMInstr* ARMInstr_NUnaryS ( ARMNeonUnOpS op, ARMNRS* dst, ARMNRS* src, UInt size, Bool Q ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_NUnaryS; i->ARMin.NUnaryS.op = op; i->ARMin.NUnaryS.src = src; @@ -1432,7 +1432,7 @@ ARMInstr* ARMInstr_NUnaryS ( ARMNeonUnOpS op, ARMNRS* dst, ARMNRS* src, ARMInstr* ARMInstr_NDual ( ARMNeonDualOp op, HReg nQ, HReg mQ, UInt size, Bool Q ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_NDual; i->ARMin.NDual.op = op; i->ARMin.NDual.arg1 = nQ; @@ -1445,7 +1445,7 @@ ARMInstr* ARMInstr_NDual ( ARMNeonDualOp op, HReg nQ, HReg mQ, ARMInstr* ARMInstr_NBinary ( ARMNeonBinOp op, HReg dst, HReg argL, HReg argR, UInt size, Bool Q ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_NBinary; i->ARMin.NBinary.op = op; i->ARMin.NBinary.argL = argL; @@ -1457,7 +1457,7 @@ ARMInstr* ARMInstr_NBinary ( ARMNeonBinOp op, } ARMInstr* ARMInstr_NeonImm (HReg dst, ARMNImm* imm ) { - ARMInstr *i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr *i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_NeonImm; i->ARMin.NeonImm.dst = dst; i->ARMin.NeonImm.imm = imm; @@ -1465,7 +1465,7 @@ ARMInstr* ARMInstr_NeonImm (HReg dst, ARMNImm* imm ) { } ARMInstr* ARMInstr_NCMovQ ( ARMCondCode cond, HReg dst, HReg src ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_NCMovQ; i->ARMin.NCMovQ.cond = cond; i->ARMin.NCMovQ.dst = dst; @@ -1477,7 +1477,7 @@ ARMInstr* ARMInstr_NCMovQ ( ARMCondCode cond, HReg dst, HReg src ) { ARMInstr* ARMInstr_NShift ( ARMNeonShiftOp op, HReg dst, HReg argL, HReg argR, UInt size, Bool Q ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_NShift; i->ARMin.NShift.op = op; i->ARMin.NShift.argL = argL; @@ -1490,7 +1490,7 @@ ARMInstr* ARMInstr_NShift ( ARMNeonShiftOp op, ARMInstr* ARMInstr_NShl64 ( HReg dst, HReg src, UInt amt ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_NShl64; i->ARMin.NShl64.dst = dst; i->ARMin.NShl64.src = src; @@ -1517,7 +1517,7 @@ static Bool fitsIn8x4 ( UInt* u8, UInt* u4, UInt u ) ARMInstr* ARMInstr_Add32 ( HReg rD, HReg rN, UInt imm32 ) { UInt u8, u4; - ARMInstr *i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr *i = LibVEX_Alloc_inline(sizeof(ARMInstr)); /* Try to generate single ADD if possible */ if (fitsIn8x4(&u8, &u4, imm32)) { i->tag = ARMin_Alu; @@ -1536,7 +1536,7 @@ ARMInstr* ARMInstr_Add32 ( HReg rD, HReg rN, UInt imm32 ) { ARMInstr* ARMInstr_EvCheck ( ARMAMode1* amCounter, ARMAMode1* amFailAddr ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_EvCheck; i->ARMin.EvCheck.amCounter = amCounter; i->ARMin.EvCheck.amFailAddr = amFailAddr; @@ -1544,7 +1544,7 @@ ARMInstr* ARMInstr_EvCheck ( ARMAMode1* amCounter, } ARMInstr* ARMInstr_ProfInc ( void ) { - ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr)); + ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr)); i->tag = ARMin_ProfInc; return i; } diff --git a/VEX/priv/host_arm_isel.c b/VEX/priv/host_arm_isel.c index 023fb74022..c3443d8149 100644 --- a/VEX/priv/host_arm_isel.c +++ b/VEX/priv/host_arm_isel.c @@ -6365,7 +6365,7 @@ HInstrArray* iselSB_ARM ( const IRSB* bb, arm_hwcaps = hwcaps_host; // JRS 2012 Mar 31: FIXME (RM) /* Make up an initial environment to use. */ - env = LibVEX_Alloc(sizeof(ISelEnv)); + env = LibVEX_Alloc_inline(sizeof(ISelEnv)); env->vreg_ctr = 0; /* Set up output code array. */ @@ -6377,8 +6377,8 @@ HInstrArray* iselSB_ARM ( const IRSB* bb, /* Make up an IRTemp -> virtual HReg mapping. This doesn't change as we go along. */ env->n_vregmap = bb->tyenv->types_used; - env->vregmap = LibVEX_Alloc(env->n_vregmap * sizeof(HReg)); - env->vregmapHI = LibVEX_Alloc(env->n_vregmap * sizeof(HReg)); + env->vregmap = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg)); + env->vregmapHI = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg)); /* and finally ... */ env->chainingAllowed = chainingAllowed; diff --git a/VEX/priv/host_generic_reg_alloc2.c b/VEX/priv/host_generic_reg_alloc2.c index 68ccd6adc1..5ea9c00586 100644 --- a/VEX/priv/host_generic_reg_alloc2.c +++ b/VEX/priv/host_generic_reg_alloc2.c @@ -225,7 +225,7 @@ static void ensureRRLRspace ( RRegLR** info, Int* size, Int used ) if (0) vex_printf("ensureRRISpace: %d -> %d\n", *size, 2 * *size); vassert(used == *size); - arr2 = LibVEX_Alloc(2 * *size * sizeof(RRegLR)); + arr2 = LibVEX_Alloc_inline(2 * *size * sizeof(RRegLR)); for (k = 0; k < *size; k++) arr2[k] = (*info)[k]; *size *= 2; @@ -463,8 +463,8 @@ HInstrArray* doRegisterAllocation ( /* If this is not so, vreg_state entries will overflow. */ vassert(n_vregs < 32767); - rreg_state = LibVEX_Alloc(n_rregs * sizeof(RRegState)); - vreg_state = LibVEX_Alloc(n_vregs * sizeof(Short)); + rreg_state = LibVEX_Alloc_inline(n_rregs * sizeof(RRegState)); + vreg_state = LibVEX_Alloc_inline(n_vregs * sizeof(Short)); for (j = 0; j < n_rregs; j++) { rreg_state[j].rreg = available_real_regs[j]; @@ -492,7 +492,7 @@ HInstrArray* doRegisterAllocation ( vreg_lrs = NULL; if (n_vregs > 0) - vreg_lrs = LibVEX_Alloc(sizeof(VRegLR) * n_vregs); + vreg_lrs = LibVEX_Alloc_inline(sizeof(VRegLR) * n_vregs); for (j = 0; j < n_vregs; j++) { vreg_lrs[j].live_after = INVALID_INSTRNO; @@ -512,14 +512,14 @@ HInstrArray* doRegisterAllocation ( rreg_lrs_used = 0; rreg_lrs_size = 4; - rreg_lrs_la = LibVEX_Alloc(rreg_lrs_size * sizeof(RRegLR)); + rreg_lrs_la = LibVEX_Alloc_inline(rreg_lrs_size * sizeof(RRegLR)); rreg_lrs_db = NULL; /* we'll create this later */ /* We'll need to track live range start/end points seperately for each rreg. Sigh. */ vassert(n_available_real_regs > 0); - rreg_live_after = LibVEX_Alloc(n_available_real_regs * sizeof(Int)); - rreg_dead_before = LibVEX_Alloc(n_available_real_regs * sizeof(Int)); + rreg_live_after = LibVEX_Alloc_inline(n_available_real_regs * sizeof(Int)); + rreg_dead_before = LibVEX_Alloc_inline(n_available_real_regs * sizeof(Int)); for (j = 0; j < n_available_real_regs; j++) { rreg_live_after[j] = @@ -745,7 +745,7 @@ HInstrArray* doRegisterAllocation ( /* Finally, copy the _la variant into the _db variant and sort both by their respective fields. */ - rreg_lrs_db = LibVEX_Alloc(rreg_lrs_used * sizeof(RRegLR)); + rreg_lrs_db = LibVEX_Alloc_inline(rreg_lrs_used * sizeof(RRegLR)); for (j = 0; j < rreg_lrs_used; j++) rreg_lrs_db[j] = rreg_lrs_la[j]; diff --git a/VEX/priv/host_generic_regs.c b/VEX/priv/host_generic_regs.c index 267b7cea91..75fd4149b4 100644 --- a/VEX/priv/host_generic_regs.c +++ b/VEX/priv/host_generic_regs.c @@ -192,10 +192,10 @@ HReg lookupHRegRemap ( HRegRemap* map, HReg orig ) HInstrArray* newHInstrArray ( void ) { - HInstrArray* ha = LibVEX_Alloc(sizeof(HInstrArray)); + HInstrArray* ha = LibVEX_Alloc_inline(sizeof(HInstrArray)); ha->arr_size = 4; ha->arr_used = 0; - ha->arr = LibVEX_Alloc(ha->arr_size * sizeof(HInstr*)); + ha->arr = LibVEX_Alloc_inline(ha->arr_size * sizeof(HInstr*)); ha->n_vregs = 0; return ha; } @@ -208,7 +208,7 @@ void addHInstr ( HInstrArray* ha, HInstr* instr ) ha->arr_used++; } else { Int i; - HInstr** arr2 = LibVEX_Alloc(ha->arr_size * 2 * sizeof(HInstr*)); + HInstr** arr2 = LibVEX_Alloc_inline(ha->arr_size * 2 * sizeof(HInstr*)); for (i = 0; i < ha->arr_size; i++) arr2[i] = ha->arr[i]; ha->arr_size *= 2; diff --git a/VEX/priv/host_mips_defs.c b/VEX/priv/host_mips_defs.c index 398778f2ed..e960c5a2c2 100644 --- a/VEX/priv/host_mips_defs.c +++ b/VEX/priv/host_mips_defs.c @@ -568,7 +568,7 @@ void getAllocableRegs_MIPS(Int * nregs, HReg ** arr, Bool mode64) else *nregs = 28; UInt i = 0; - *arr = LibVEX_Alloc(*nregs * sizeof(HReg)); + *arr = LibVEX_Alloc_inline(*nregs * sizeof(HReg)); /* ZERO = constant 0 AT = assembler temporary @@ -871,7 +871,7 @@ const HChar* showMIPSMoveCondOp ( MIPSMoveCondOp op ) MIPSAMode *MIPSAMode_IR(Int idx, HReg base) { - MIPSAMode *am = LibVEX_Alloc(sizeof(MIPSAMode)); + MIPSAMode *am = LibVEX_Alloc_inline(sizeof(MIPSAMode)); am->tag = Mam_IR; am->Mam.IR.base = base; am->Mam.IR.index = idx; @@ -881,7 +881,7 @@ MIPSAMode *MIPSAMode_IR(Int idx, HReg base) MIPSAMode *MIPSAMode_RR(HReg idx, HReg base) { - MIPSAMode *am = LibVEX_Alloc(sizeof(MIPSAMode)); + MIPSAMode *am = LibVEX_Alloc_inline(sizeof(MIPSAMode)); am->tag = Mam_RR; am->Mam.RR.base = base; am->Mam.RR.index = idx; @@ -1004,7 +1004,7 @@ static void mapRegs_MIPSAMode(HRegRemap * m, MIPSAMode * am) MIPSRH *MIPSRH_Imm(Bool syned, UShort imm16) { - MIPSRH *op = LibVEX_Alloc(sizeof(MIPSRH)); + MIPSRH *op = LibVEX_Alloc_inline(sizeof(MIPSRH)); op->tag = Mrh_Imm; op->Mrh.Imm.syned = syned; op->Mrh.Imm.imm16 = imm16; @@ -1018,7 +1018,7 @@ MIPSRH *MIPSRH_Imm(Bool syned, UShort imm16) MIPSRH *MIPSRH_Reg(HReg reg) { - MIPSRH *op = LibVEX_Alloc(sizeof(MIPSRH)); + MIPSRH *op = LibVEX_Alloc_inline(sizeof(MIPSRH)); op->tag = Mrh_Reg; op->Mrh.Reg.reg = reg; return op; @@ -1180,7 +1180,7 @@ const HChar *showMIPSMaccOp(MIPSMaccOp op, Bool variable) MIPSInstr *MIPSInstr_LI(HReg dst, ULong imm) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_LI; i->Min.LI.dst = dst; i->Min.LI.imm = imm; @@ -1189,7 +1189,7 @@ MIPSInstr *MIPSInstr_LI(HReg dst, ULong imm) MIPSInstr *MIPSInstr_Alu(MIPSAluOp op, HReg dst, HReg srcL, MIPSRH * srcR) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_Alu; i->Min.Alu.op = op; i->Min.Alu.dst = dst; @@ -1201,7 +1201,7 @@ MIPSInstr *MIPSInstr_Alu(MIPSAluOp op, HReg dst, HReg srcL, MIPSRH * srcR) MIPSInstr *MIPSInstr_Shft(MIPSShftOp op, Bool sz32, HReg dst, HReg srcL, MIPSRH * srcR) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_Shft; i->Min.Shft.op = op; i->Min.Shft.sz32 = sz32; @@ -1213,7 +1213,7 @@ MIPSInstr *MIPSInstr_Shft(MIPSShftOp op, Bool sz32, HReg dst, HReg srcL, MIPSInstr *MIPSInstr_Unary(MIPSUnaryOp op, HReg dst, HReg src) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_Unary; i->Min.Unary.op = op; i->Min.Unary.dst = dst; @@ -1224,7 +1224,7 @@ MIPSInstr *MIPSInstr_Unary(MIPSUnaryOp op, HReg dst, HReg src) MIPSInstr *MIPSInstr_Cmp(Bool syned, Bool sz32, HReg dst, HReg srcL, HReg srcR, MIPSCondCode cond) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_Cmp; i->Min.Cmp.syned = syned; i->Min.Cmp.sz32 = sz32; @@ -1239,7 +1239,7 @@ MIPSInstr *MIPSInstr_Cmp(Bool syned, Bool sz32, HReg dst, HReg srcL, HReg srcR, MIPSInstr *MIPSInstr_Mul(Bool syned, Bool wid, Bool sz32, HReg dst, HReg srcL, HReg srcR) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_Mul; i->Min.Mul.syned = syned; i->Min.Mul.widening = wid; /* widen=True else False */ @@ -1253,7 +1253,7 @@ MIPSInstr *MIPSInstr_Mul(Bool syned, Bool wid, Bool sz32, HReg dst, HReg srcL, /* msub */ MIPSInstr *MIPSInstr_Msub(Bool syned, HReg srcL, HReg srcR) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_Macc; i->Min.Macc.op = Macc_SUB; @@ -1266,7 +1266,7 @@ MIPSInstr *MIPSInstr_Msub(Bool syned, HReg srcL, HReg srcR) /* madd */ MIPSInstr *MIPSInstr_Madd(Bool syned, HReg srcL, HReg srcR) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_Macc; i->Min.Macc.op = Macc_ADD; @@ -1279,7 +1279,7 @@ MIPSInstr *MIPSInstr_Madd(Bool syned, HReg srcL, HReg srcR) /* div */ MIPSInstr *MIPSInstr_Div(Bool syned, Bool sz32, HReg srcL, HReg srcR) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_Div; i->Min.Div.syned = syned; i->Min.Div.sz32 = sz32; /* True = 32 bits */ @@ -1292,7 +1292,7 @@ MIPSInstr *MIPSInstr_Call ( MIPSCondCode cond, Addr64 target, UInt argiregs, HReg src, RetLoc rloc ) { UInt mask; - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_Call; i->Min.Call.cond = cond; i->Min.Call.target = target; @@ -1311,7 +1311,7 @@ MIPSInstr *MIPSInstr_CallAlways ( MIPSCondCode cond, Addr64 target, UInt argiregs, RetLoc rloc ) { UInt mask; - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_Call; i->Min.Call.cond = cond; i->Min.Call.target = target; @@ -1327,7 +1327,7 @@ MIPSInstr *MIPSInstr_CallAlways ( MIPSCondCode cond, Addr64 target, MIPSInstr *MIPSInstr_XDirect ( Addr64 dstGA, MIPSAMode* amPC, MIPSCondCode cond, Bool toFastEP ) { - MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_XDirect; i->Min.XDirect.dstGA = dstGA; i->Min.XDirect.amPC = amPC; @@ -1338,7 +1338,7 @@ MIPSInstr *MIPSInstr_XDirect ( Addr64 dstGA, MIPSAMode* amPC, MIPSInstr *MIPSInstr_XIndir ( HReg dstGA, MIPSAMode* amPC, MIPSCondCode cond ) { - MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_XIndir; i->Min.XIndir.dstGA = dstGA; i->Min.XIndir.amPC = amPC; @@ -1348,7 +1348,7 @@ MIPSInstr *MIPSInstr_XIndir ( HReg dstGA, MIPSAMode* amPC, MIPSInstr *MIPSInstr_XAssisted ( HReg dstGA, MIPSAMode* amPC, MIPSCondCode cond, IRJumpKind jk ) { - MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_XAssisted; i->Min.XAssisted.dstGA = dstGA; i->Min.XAssisted.amPC = amPC; @@ -1359,7 +1359,7 @@ MIPSInstr *MIPSInstr_XAssisted ( HReg dstGA, MIPSAMode* amPC, MIPSInstr *MIPSInstr_Load(UChar sz, HReg dst, MIPSAMode * src, Bool mode64) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_Load; i->Min.Load.sz = sz; i->Min.Load.src = src; @@ -1373,7 +1373,7 @@ MIPSInstr *MIPSInstr_Load(UChar sz, HReg dst, MIPSAMode * src, Bool mode64) MIPSInstr *MIPSInstr_Store(UChar sz, MIPSAMode * dst, HReg src, Bool mode64) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_Store; i->Min.Store.sz = sz; i->Min.Store.src = src; @@ -1387,7 +1387,7 @@ MIPSInstr *MIPSInstr_Store(UChar sz, MIPSAMode * dst, HReg src, Bool mode64) MIPSInstr *MIPSInstr_LoadL(UChar sz, HReg dst, MIPSAMode * src, Bool mode64) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_LoadL; i->Min.LoadL.sz = sz; i->Min.LoadL.src = src; @@ -1402,7 +1402,7 @@ MIPSInstr *MIPSInstr_LoadL(UChar sz, HReg dst, MIPSAMode * src, Bool mode64) MIPSInstr *MIPSInstr_Cas(UChar sz, HReg old, HReg addr, HReg expd, HReg data, Bool mode64) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_Cas; i->Min.Cas.sz = sz; i->Min.Cas.old = old; @@ -1418,7 +1418,7 @@ MIPSInstr *MIPSInstr_Cas(UChar sz, HReg old, HReg addr, MIPSInstr *MIPSInstr_StoreC(UChar sz, MIPSAMode * dst, HReg src, Bool mode64) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_StoreC; i->Min.StoreC.sz = sz; i->Min.StoreC.src = src; @@ -1432,7 +1432,7 @@ MIPSInstr *MIPSInstr_StoreC(UChar sz, MIPSAMode * dst, HReg src, Bool mode64) MIPSInstr *MIPSInstr_Mthi(HReg src) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_Mthi; i->Min.MtHL.src = src; return i; @@ -1440,7 +1440,7 @@ MIPSInstr *MIPSInstr_Mthi(HReg src) MIPSInstr *MIPSInstr_Mtlo(HReg src) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_Mtlo; i->Min.MtHL.src = src; return i; @@ -1448,7 +1448,7 @@ MIPSInstr *MIPSInstr_Mtlo(HReg src) MIPSInstr *MIPSInstr_Mfhi(HReg dst) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_Mfhi; i->Min.MfHL.dst = dst; return i; @@ -1456,7 +1456,7 @@ MIPSInstr *MIPSInstr_Mfhi(HReg dst) MIPSInstr *MIPSInstr_Mflo(HReg dst) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_Mflo; i->Min.MfHL.dst = dst; return i; @@ -1465,7 +1465,7 @@ MIPSInstr *MIPSInstr_Mflo(HReg dst) /* Read/Write Link Register */ MIPSInstr *MIPSInstr_RdWrLR(Bool wrLR, HReg gpr) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_RdWrLR; i->Min.RdWrLR.wrLR = wrLR; i->Min.RdWrLR.gpr = gpr; @@ -1474,7 +1474,7 @@ MIPSInstr *MIPSInstr_RdWrLR(Bool wrLR, HReg gpr) MIPSInstr *MIPSInstr_FpLdSt(Bool isLoad, UChar sz, HReg reg, MIPSAMode * addr) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_FpLdSt; i->Min.FpLdSt.isLoad = isLoad; i->Min.FpLdSt.sz = sz; @@ -1486,7 +1486,7 @@ MIPSInstr *MIPSInstr_FpLdSt(Bool isLoad, UChar sz, HReg reg, MIPSAMode * addr) MIPSInstr *MIPSInstr_FpUnary(MIPSFpOp op, HReg dst, HReg src) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_FpUnary; i->Min.FpUnary.op = op; i->Min.FpUnary.dst = dst; @@ -1496,7 +1496,7 @@ MIPSInstr *MIPSInstr_FpUnary(MIPSFpOp op, HReg dst, HReg src) MIPSInstr *MIPSInstr_FpBinary(MIPSFpOp op, HReg dst, HReg srcL, HReg srcR) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_FpBinary; i->Min.FpBinary.op = op; i->Min.FpBinary.dst = dst; @@ -1508,7 +1508,7 @@ MIPSInstr *MIPSInstr_FpBinary(MIPSFpOp op, HReg dst, HReg srcL, HReg srcR) MIPSInstr *MIPSInstr_FpTernary ( MIPSFpOp op, HReg dst, HReg src1, HReg src2, HReg src3 ) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_FpTernary; i->Min.FpTernary.op = op; i->Min.FpTernary.dst = dst; @@ -1520,7 +1520,7 @@ MIPSInstr *MIPSInstr_FpTernary ( MIPSFpOp op, HReg dst, HReg src1, HReg src2, MIPSInstr *MIPSInstr_FpConvert(MIPSFpOp op, HReg dst, HReg src) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_FpConvert; i->Min.FpConvert.op = op; i->Min.FpConvert.dst = dst; @@ -1531,7 +1531,7 @@ MIPSInstr *MIPSInstr_FpConvert(MIPSFpOp op, HReg dst, HReg src) MIPSInstr *MIPSInstr_FpCompare(MIPSFpOp op, HReg dst, HReg srcL, HReg srcR) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_FpCompare; i->Min.FpCompare.op = op; i->Min.FpCompare.dst = dst; @@ -1542,7 +1542,7 @@ MIPSInstr *MIPSInstr_FpCompare(MIPSFpOp op, HReg dst, HReg srcL, HReg srcR) MIPSInstr *MIPSInstr_MtFCSR(HReg src) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_MtFCSR; i->Min.MtFCSR.src = src; return i; @@ -1550,7 +1550,7 @@ MIPSInstr *MIPSInstr_MtFCSR(HReg src) MIPSInstr *MIPSInstr_MfFCSR(HReg dst) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_MfFCSR; i->Min.MfFCSR.dst = dst; return i; @@ -1558,7 +1558,7 @@ MIPSInstr *MIPSInstr_MfFCSR(HReg dst) MIPSInstr *MIPSInstr_FpGpMove ( MIPSFpGpMoveOp op, HReg dst, HReg src ) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_FpGpMove; i->Min.FpGpMove.op = op; i->Min.FpGpMove.dst = dst; @@ -1569,7 +1569,7 @@ MIPSInstr *MIPSInstr_FpGpMove ( MIPSFpGpMoveOp op, HReg dst, HReg src ) MIPSInstr *MIPSInstr_MoveCond ( MIPSMoveCondOp op, HReg dst, HReg src, HReg cond ) { - MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_MoveCond; i->Min.MoveCond.op = op; i->Min.MoveCond.dst = dst; @@ -1580,7 +1580,7 @@ MIPSInstr *MIPSInstr_MoveCond ( MIPSMoveCondOp op, HReg dst, HReg src, MIPSInstr *MIPSInstr_EvCheck ( MIPSAMode* amCounter, MIPSAMode* amFailAddr ) { - MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_EvCheck; i->Min.EvCheck.amCounter = amCounter; i->Min.EvCheck.amFailAddr = amFailAddr; @@ -1588,7 +1588,7 @@ MIPSInstr *MIPSInstr_EvCheck ( MIPSAMode* amCounter, } MIPSInstr* MIPSInstr_ProfInc ( void ) { - MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr)); + MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr)); i->tag = Min_ProfInc; return i; } diff --git a/VEX/priv/host_mips_isel.c b/VEX/priv/host_mips_isel.c index 077bfb0d4f..09ac321ece 100644 --- a/VEX/priv/host_mips_isel.c +++ b/VEX/priv/host_mips_isel.c @@ -4185,7 +4185,7 @@ HInstrArray *iselSB_MIPS ( const IRSB* bb, #endif /* Make up an initial environment to use. */ - env = LibVEX_Alloc(sizeof(ISelEnv)); + env = LibVEX_Alloc_inline(sizeof(ISelEnv)); env->vreg_ctr = 0; env->mode64 = mode64; env->fp_mode64 = fp_mode64; @@ -4199,8 +4199,8 @@ HInstrArray *iselSB_MIPS ( const IRSB* bb, /* Make up an IRTemp -> virtual HReg mapping. This doesn't change as we go along. */ env->n_vregmap = bb->tyenv->types_used; - env->vregmap = LibVEX_Alloc(env->n_vregmap * sizeof(HReg)); - env->vregmapHI = LibVEX_Alloc(env->n_vregmap * sizeof(HReg)); + env->vregmap = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg)); + env->vregmapHI = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg)); /* and finally ... */ env->hwcaps = hwcaps_host; diff --git a/VEX/priv/host_ppc_defs.c b/VEX/priv/host_ppc_defs.c index 782cbbfbb7..57e3b38f89 100644 --- a/VEX/priv/host_ppc_defs.c +++ b/VEX/priv/host_ppc_defs.c @@ -200,7 +200,7 @@ void getAllocableRegs_PPC ( Int* nregs, HReg** arr, Bool mode64 ) *nregs = (32-9) + (32-24) + (32-24); else *nregs = (32-7) + (32-24) + (32-24); - *arr = LibVEX_Alloc(*nregs * sizeof(HReg)); + *arr = LibVEX_Alloc_inline(*nregs * sizeof(HReg)); // GPR0 = scratch reg where poss. - some ops interpret as value zero // GPR1 = stack pointer // GPR2 = TOC pointer @@ -317,7 +317,7 @@ PPCCondTest invertCondTest ( PPCCondTest ct ) /* --------- PPCAMode: memory address expressions. --------- */ PPCAMode* PPCAMode_IR ( Int idx, HReg base ) { - PPCAMode* am = LibVEX_Alloc(sizeof(PPCAMode)); + PPCAMode* am = LibVEX_Alloc_inline(sizeof(PPCAMode)); vassert(idx >= -0x8000 && idx < 0x8000); am->tag = Pam_IR; am->Pam.IR.base = base; @@ -325,7 +325,7 @@ PPCAMode* PPCAMode_IR ( Int idx, HReg base ) { return am; } PPCAMode* PPCAMode_RR ( HReg idx, HReg base ) { - PPCAMode* am = LibVEX_Alloc(sizeof(PPCAMode)); + PPCAMode* am = LibVEX_Alloc_inline(sizeof(PPCAMode)); am->tag = Pam_RR; am->Pam.RR.base = base; am->Pam.RR.index = idx; @@ -394,7 +394,7 @@ static void mapRegs_PPCAMode ( HRegRemap* m, PPCAMode* am ) { /* --------- Operand, which can be a reg or a u16/s16. --------- */ PPCRH* PPCRH_Imm ( Bool syned, UShort imm16 ) { - PPCRH* op = LibVEX_Alloc(sizeof(PPCRH)); + PPCRH* op = LibVEX_Alloc_inline(sizeof(PPCRH)); op->tag = Prh_Imm; op->Prh.Imm.syned = syned; op->Prh.Imm.imm16 = imm16; @@ -406,7 +406,7 @@ PPCRH* PPCRH_Imm ( Bool syned, UShort imm16 ) { return op; } PPCRH* PPCRH_Reg ( HReg reg ) { - PPCRH* op = LibVEX_Alloc(sizeof(PPCRH)); + PPCRH* op = LibVEX_Alloc_inline(sizeof(PPCRH)); op->tag = Prh_Reg; op->Prh.Reg.reg = reg; return op; @@ -459,13 +459,13 @@ static void mapRegs_PPCRH ( HRegRemap* m, PPCRH* op ) { /* --------- Operand, which can be a reg or a u32/64. --------- */ PPCRI* PPCRI_Imm ( ULong imm64 ) { - PPCRI* op = LibVEX_Alloc(sizeof(PPCRI)); + PPCRI* op = LibVEX_Alloc_inline(sizeof(PPCRI)); op->tag = Pri_Imm; op->Pri.Imm = imm64; return op; } PPCRI* PPCRI_Reg ( HReg reg ) { - PPCRI* op = LibVEX_Alloc(sizeof(PPCRI)); + PPCRI* op = LibVEX_Alloc_inline(sizeof(PPCRI)); op->tag = Pri_Reg; op->Pri.Reg = reg; return op; @@ -515,14 +515,14 @@ static void mapRegs_PPCRI ( HRegRemap* m, PPCRI* dst ) { /* --------- Operand, which can be a vector reg or a simm5. --------- */ PPCVI5s* PPCVI5s_Imm ( Char simm5 ) { - PPCVI5s* op = LibVEX_Alloc(sizeof(PPCVI5s)); + PPCVI5s* op = LibVEX_Alloc_inline(sizeof(PPCVI5s)); op->tag = Pvi_Imm; op->Pvi.Imm5s = simm5; vassert(simm5 >= -16 && simm5 <= 15); return op; } PPCVI5s* PPCVI5s_Reg ( HReg reg ) { - PPCVI5s* op = LibVEX_Alloc(sizeof(PPCVI5s)); + PPCVI5s* op = LibVEX_Alloc_inline(sizeof(PPCVI5s)); op->tag = Pvi_Reg; op->Pvi.Reg = reg; vassert(hregClass(reg) == HRcVec128); @@ -782,7 +782,7 @@ const HChar* showPPCAvFpOp ( PPCAvFpOp op ) { PPCInstr* PPCInstr_LI ( HReg dst, ULong imm64, Bool mode64 ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_LI; i->Pin.LI.dst = dst; i->Pin.LI.imm64 = imm64; @@ -792,7 +792,7 @@ PPCInstr* PPCInstr_LI ( HReg dst, ULong imm64, Bool mode64 ) } PPCInstr* PPCInstr_Alu ( PPCAluOp op, HReg dst, HReg srcL, PPCRH* srcR ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_Alu; i->Pin.Alu.op = op; i->Pin.Alu.dst = dst; @@ -802,7 +802,7 @@ PPCInstr* PPCInstr_Alu ( PPCAluOp op, HReg dst, } PPCInstr* PPCInstr_Shft ( PPCShftOp op, Bool sz32, HReg dst, HReg srcL, PPCRH* srcR ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_Shft; i->Pin.Shft.op = op; i->Pin.Shft.sz32 = sz32; @@ -813,7 +813,7 @@ PPCInstr* PPCInstr_Shft ( PPCShftOp op, Bool sz32, } PPCInstr* PPCInstr_AddSubC ( Bool isAdd, Bool setC, HReg dst, HReg srcL, HReg srcR ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_AddSubC; i->Pin.AddSubC.isAdd = isAdd; i->Pin.AddSubC.setC = setC; @@ -824,7 +824,7 @@ PPCInstr* PPCInstr_AddSubC ( Bool isAdd, Bool setC, } PPCInstr* PPCInstr_Cmp ( Bool syned, Bool sz32, UInt crfD, HReg srcL, PPCRH* srcR ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_Cmp; i->Pin.Cmp.syned = syned; i->Pin.Cmp.sz32 = sz32; @@ -834,7 +834,7 @@ PPCInstr* PPCInstr_Cmp ( Bool syned, Bool sz32, return i; } PPCInstr* PPCInstr_Unary ( PPCUnaryOp op, HReg dst, HReg src ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_Unary; i->Pin.Unary.op = op; i->Pin.Unary.dst = dst; @@ -843,7 +843,7 @@ PPCInstr* PPCInstr_Unary ( PPCUnaryOp op, HReg dst, HReg src ) { } PPCInstr* PPCInstr_MulL ( Bool syned, Bool hi, Bool sz32, HReg dst, HReg srcL, HReg srcR ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_MulL; i->Pin.MulL.syned = syned; i->Pin.MulL.hi = hi; @@ -858,7 +858,7 @@ PPCInstr* PPCInstr_MulL ( Bool syned, Bool hi, Bool sz32, } PPCInstr* PPCInstr_Div ( Bool extended, Bool syned, Bool sz32, HReg dst, HReg srcL, HReg srcR ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_Div; i->Pin.Div.extended = extended; i->Pin.Div.syned = syned; @@ -871,7 +871,7 @@ PPCInstr* PPCInstr_Div ( Bool extended, Bool syned, Bool sz32, PPCInstr* PPCInstr_Call ( PPCCondCode cond, Addr64 target, UInt argiregs, RetLoc rloc ) { UInt mask; - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_Call; i->Pin.Call.cond = cond; i->Pin.Call.target = target; @@ -885,7 +885,7 @@ PPCInstr* PPCInstr_Call ( PPCCondCode cond, } PPCInstr* PPCInstr_XDirect ( Addr64 dstGA, PPCAMode* amCIA, PPCCondCode cond, Bool toFastEP ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_XDirect; i->Pin.XDirect.dstGA = dstGA; i->Pin.XDirect.amCIA = amCIA; @@ -895,7 +895,7 @@ PPCInstr* PPCInstr_XDirect ( Addr64 dstGA, PPCAMode* amCIA, } PPCInstr* PPCInstr_XIndir ( HReg dstGA, PPCAMode* amCIA, PPCCondCode cond ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_XIndir; i->Pin.XIndir.dstGA = dstGA; i->Pin.XIndir.amCIA = amCIA; @@ -904,7 +904,7 @@ PPCInstr* PPCInstr_XIndir ( HReg dstGA, PPCAMode* amCIA, } PPCInstr* PPCInstr_XAssisted ( HReg dstGA, PPCAMode* amCIA, PPCCondCode cond, IRJumpKind jk ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_XAssisted; i->Pin.XAssisted.dstGA = dstGA; i->Pin.XAssisted.amCIA = amCIA; @@ -914,7 +914,7 @@ PPCInstr* PPCInstr_XAssisted ( HReg dstGA, PPCAMode* amCIA, } PPCInstr* PPCInstr_CMov ( PPCCondCode cond, HReg dst, PPCRI* src ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_CMov; i->Pin.CMov.cond = cond; i->Pin.CMov.src = src; @@ -924,7 +924,7 @@ PPCInstr* PPCInstr_CMov ( PPCCondCode cond, } PPCInstr* PPCInstr_Load ( UChar sz, HReg dst, PPCAMode* src, Bool mode64 ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_Load; i->Pin.Load.sz = sz; i->Pin.Load.src = src; @@ -936,7 +936,7 @@ PPCInstr* PPCInstr_Load ( UChar sz, PPCInstr* PPCInstr_LoadL ( UChar sz, HReg dst, HReg src, Bool mode64 ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_LoadL; i->Pin.LoadL.sz = sz; i->Pin.LoadL.src = src; @@ -947,7 +947,7 @@ PPCInstr* PPCInstr_LoadL ( UChar sz, } PPCInstr* PPCInstr_Store ( UChar sz, PPCAMode* dst, HReg src, Bool mode64 ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_Store; i->Pin.Store.sz = sz; i->Pin.Store.src = src; @@ -957,7 +957,7 @@ PPCInstr* PPCInstr_Store ( UChar sz, PPCAMode* dst, HReg src, return i; } PPCInstr* PPCInstr_StoreC ( UChar sz, HReg dst, HReg src, Bool mode64 ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_StoreC; i->Pin.StoreC.sz = sz; i->Pin.StoreC.src = src; @@ -967,7 +967,7 @@ PPCInstr* PPCInstr_StoreC ( UChar sz, HReg dst, HReg src, Bool mode64 ) { return i; } PPCInstr* PPCInstr_Set ( PPCCondCode cond, HReg dst ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_Set; i->Pin.Set.cond = cond; i->Pin.Set.dst = dst; @@ -975,20 +975,20 @@ PPCInstr* PPCInstr_Set ( PPCCondCode cond, HReg dst ) { } PPCInstr* PPCInstr_MfCR ( HReg dst ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_MfCR; i->Pin.MfCR.dst = dst; return i; } PPCInstr* PPCInstr_MFence ( void ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_MFence; return i; } PPCInstr* PPCInstr_FpUnary ( PPCFpOp op, HReg dst, HReg src ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_FpUnary; i->Pin.FpUnary.op = op; i->Pin.FpUnary.dst = dst; @@ -997,7 +997,7 @@ PPCInstr* PPCInstr_FpUnary ( PPCFpOp op, HReg dst, HReg src ) { } PPCInstr* PPCInstr_FpBinary ( PPCFpOp op, HReg dst, HReg srcL, HReg srcR ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_FpBinary; i->Pin.FpBinary.op = op; i->Pin.FpBinary.dst = dst; @@ -1008,7 +1008,7 @@ PPCInstr* PPCInstr_FpBinary ( PPCFpOp op, HReg dst, PPCInstr* PPCInstr_FpMulAcc ( PPCFpOp op, HReg dst, HReg srcML, HReg srcMR, HReg srcAcc ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_FpMulAcc; i->Pin.FpMulAcc.op = op; i->Pin.FpMulAcc.dst = dst; @@ -1019,7 +1019,7 @@ PPCInstr* PPCInstr_FpMulAcc ( PPCFpOp op, HReg dst, HReg srcML, } PPCInstr* PPCInstr_FpLdSt ( Bool isLoad, UChar sz, HReg reg, PPCAMode* addr ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_FpLdSt; i->Pin.FpLdSt.isLoad = isLoad; i->Pin.FpLdSt.sz = sz; @@ -1030,21 +1030,21 @@ PPCInstr* PPCInstr_FpLdSt ( Bool isLoad, UChar sz, } PPCInstr* PPCInstr_FpSTFIW ( HReg addr, HReg data ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_FpSTFIW; i->Pin.FpSTFIW.addr = addr; i->Pin.FpSTFIW.data = data; return i; } PPCInstr* PPCInstr_FpRSP ( HReg dst, HReg src ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_FpRSP; i->Pin.FpRSP.dst = dst; i->Pin.FpRSP.src = src; return i; } PPCInstr* PPCInstr_Dfp64Unary(PPCFpOp op, HReg dst, HReg src) { - PPCInstr* i = LibVEX_Alloc( sizeof(PPCInstr) ); + PPCInstr* i = LibVEX_Alloc_inline( sizeof(PPCInstr) ); i->tag = Pin_Dfp64Unary; i->Pin.Dfp64Unary.op = op; i->Pin.Dfp64Unary.dst = dst; @@ -1052,7 +1052,7 @@ PPCInstr* PPCInstr_Dfp64Unary(PPCFpOp op, HReg dst, HReg src) { return i; } PPCInstr* PPCInstr_Dfp64Binary(PPCFpOp op, HReg dst, HReg srcL, HReg srcR) { - PPCInstr* i = LibVEX_Alloc( sizeof(PPCInstr) ); + PPCInstr* i = LibVEX_Alloc_inline( sizeof(PPCInstr) ); i->tag = Pin_Dfp64Binary; i->Pin.Dfp64Binary.op = op; i->Pin.Dfp64Binary.dst = dst; @@ -1061,7 +1061,7 @@ PPCInstr* PPCInstr_Dfp64Binary(PPCFpOp op, HReg dst, HReg srcL, HReg srcR) { return i; } PPCInstr* PPCInstr_DfpShift ( PPCFpOp op, HReg dst, HReg src, PPCRI* shift ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_DfpShift; i->Pin.DfpShift.op = op; i->Pin.DfpShift.shift = shift; @@ -1071,7 +1071,7 @@ PPCInstr* PPCInstr_DfpShift ( PPCFpOp op, HReg dst, HReg src, PPCRI* shift ) { } PPCInstr* PPCInstr_Dfp128Unary(PPCFpOp op, HReg dst_hi, HReg dst_lo, HReg src_hi, HReg src_lo) { - PPCInstr* i = LibVEX_Alloc( sizeof(PPCInstr) ); + PPCInstr* i = LibVEX_Alloc_inline( sizeof(PPCInstr) ); i->tag = Pin_Dfp128Unary; i->Pin.Dfp128Unary.op = op; i->Pin.Dfp128Unary.dst_hi = dst_hi; @@ -1083,7 +1083,7 @@ PPCInstr* PPCInstr_Dfp128Unary(PPCFpOp op, HReg dst_hi, HReg dst_lo, PPCInstr* PPCInstr_Dfp128Binary(PPCFpOp op, HReg dst_hi, HReg dst_lo, HReg srcR_hi, HReg srcR_lo) { /* dst is used to pass the srcL argument and return the result */ - PPCInstr* i = LibVEX_Alloc( sizeof(PPCInstr) ); + PPCInstr* i = LibVEX_Alloc_inline( sizeof(PPCInstr) ); i->tag = Pin_Dfp128Binary; i->Pin.Dfp128Binary.op = op; i->Pin.Dfp128Binary.dst_hi = dst_hi; @@ -1095,7 +1095,7 @@ PPCInstr* PPCInstr_Dfp128Binary(PPCFpOp op, HReg dst_hi, HReg dst_lo, PPCInstr* PPCInstr_DfpShift128 ( PPCFpOp op, HReg dst_hi, HReg dst_lo, HReg src_hi, HReg src_lo, PPCRI* shift ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_DfpShift128; i->Pin.DfpShift128.op = op; i->Pin.DfpShift128.shift = shift; @@ -1106,7 +1106,7 @@ PPCInstr* PPCInstr_DfpShift128 ( PPCFpOp op, HReg dst_hi, HReg dst_lo, return i; } PPCInstr* PPCInstr_DfpRound ( HReg dst, HReg src, PPCRI* r_rmc ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_DfpRound; i->Pin.DfpRound.dst = dst; i->Pin.DfpRound.src = src; @@ -1115,7 +1115,7 @@ PPCInstr* PPCInstr_DfpRound ( HReg dst, HReg src, PPCRI* r_rmc ) { } PPCInstr* PPCInstr_DfpRound128 ( HReg dst_hi, HReg dst_lo, HReg src_hi, HReg src_lo, PPCRI* r_rmc ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_DfpRound128; i->Pin.DfpRound128.dst_hi = dst_hi; i->Pin.DfpRound128.dst_lo = dst_lo; @@ -1126,7 +1126,7 @@ PPCInstr* PPCInstr_DfpRound128 ( HReg dst_hi, HReg dst_lo, HReg src_hi, } PPCInstr* PPCInstr_DfpQuantize ( PPCFpOp op, HReg dst, HReg srcL, HReg srcR, PPCRI* rmc ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_DfpQuantize; i->Pin.DfpQuantize.op = op; i->Pin.DfpQuantize.dst = dst; @@ -1138,7 +1138,7 @@ PPCInstr* PPCInstr_DfpQuantize ( PPCFpOp op, HReg dst, HReg srcL, HReg srcR, PPCInstr* PPCInstr_DfpQuantize128 ( PPCFpOp op, HReg dst_hi, HReg dst_lo, HReg src_hi, HReg src_lo, PPCRI* rmc ) { /* dst is used to pass left operand in and return result */ - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_DfpQuantize128; i->Pin.DfpQuantize128.op = op; i->Pin.DfpQuantize128.dst_hi = dst_hi; @@ -1150,7 +1150,7 @@ PPCInstr* PPCInstr_DfpQuantize128 ( PPCFpOp op, HReg dst_hi, HReg dst_lo, } PPCInstr* PPCInstr_DfpD128toD64 ( PPCFpOp op, HReg dst, HReg src_hi, HReg src_lo ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_DfpD128toD64; i->Pin.DfpD128toD64.op = op; i->Pin.DfpD128toD64.src_hi = src_hi; @@ -1160,7 +1160,7 @@ PPCInstr* PPCInstr_DfpD128toD64 ( PPCFpOp op, HReg dst, } PPCInstr* PPCInstr_DfpI64StoD128 ( PPCFpOp op, HReg dst_hi, HReg dst_lo, HReg src ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_DfpI64StoD128; i->Pin.DfpI64StoD128.op = op; i->Pin.DfpI64StoD128.src = src; @@ -1171,7 +1171,7 @@ PPCInstr* PPCInstr_DfpI64StoD128 ( PPCFpOp op, HReg dst_hi, PPCInstr* PPCInstr_ExtractExpD128 ( PPCFpOp op, HReg dst, HReg src_hi, HReg src_lo ) { /* dst is used to pass the srcL argument */ - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_ExtractExpD128; i->Pin.ExtractExpD128.op = op; i->Pin.ExtractExpD128.dst = dst; @@ -1182,7 +1182,7 @@ PPCInstr* PPCInstr_ExtractExpD128 ( PPCFpOp op, HReg dst, PPCInstr* PPCInstr_InsertExpD128 ( PPCFpOp op, HReg dst_hi, HReg dst_lo, HReg srcL, HReg srcR_hi, HReg srcR_lo ) { /* dst is used to pass the srcL argument */ - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_InsertExpD128; i->Pin.InsertExpD128.op = op; i->Pin.InsertExpD128.dst_hi = dst_hi; @@ -1193,7 +1193,7 @@ PPCInstr* PPCInstr_InsertExpD128 ( PPCFpOp op, HReg dst_hi, HReg dst_lo, return i; } PPCInstr* PPCInstr_Dfp64Cmp (/* UInt crfD,*/ HReg dst, HReg srcL, HReg srcR ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_Dfp64Cmp; i->Pin.Dfp64Cmp.dst = dst; i->Pin.Dfp64Cmp.srcL = srcL; @@ -1202,7 +1202,7 @@ PPCInstr* PPCInstr_Dfp64Cmp (/* UInt crfD,*/ HReg dst, HReg srcL, HReg srcR ) { } PPCInstr* PPCInstr_Dfp128Cmp ( HReg dst, HReg srcL_hi, HReg srcL_lo, HReg srcR_hi, HReg srcR_lo ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_Dfp128Cmp; i->Pin.Dfp128Cmp.dst = dst; i->Pin.Dfp128Cmp.srcL_hi = srcL_hi; @@ -1213,14 +1213,14 @@ PPCInstr* PPCInstr_Dfp128Cmp ( HReg dst, HReg srcL_hi, HReg srcL_lo, } PPCInstr* PPCInstr_EvCheck ( PPCAMode* amCounter, PPCAMode* amFailAddr ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_EvCheck; i->Pin.EvCheck.amCounter = amCounter; i->Pin.EvCheck.amFailAddr = amFailAddr; return i; } PPCInstr* PPCInstr_ProfInc ( void ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_ProfInc; return i; } @@ -1275,7 +1275,7 @@ PPCInstr* PPCInstr_FpCftI ( Bool fromI, Bool int32, Bool syned, default: vpanic("PPCInstr_FpCftI(ppc_host)"); } - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_FpCftI; i->Pin.FpCftI.fromI = fromI; i->Pin.FpCftI.int32 = int32; @@ -1286,7 +1286,7 @@ PPCInstr* PPCInstr_FpCftI ( Bool fromI, Bool int32, Bool syned, return i; } PPCInstr* PPCInstr_FpCMov ( PPCCondCode cond, HReg dst, HReg src ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_FpCMov; i->Pin.FpCMov.cond = cond; i->Pin.FpCMov.dst = dst; @@ -1295,14 +1295,14 @@ PPCInstr* PPCInstr_FpCMov ( PPCCondCode cond, HReg dst, HReg src ) { return i; } PPCInstr* PPCInstr_FpLdFPSCR ( HReg src, Bool dfp_rm ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_FpLdFPSCR; i->Pin.FpLdFPSCR.src = src; i->Pin.FpLdFPSCR.dfp_rm = dfp_rm ? 1 : 0; return i; } PPCInstr* PPCInstr_FpCmp ( HReg dst, HReg srcL, HReg srcR ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_FpCmp; i->Pin.FpCmp.dst = dst; i->Pin.FpCmp.srcL = srcL; @@ -1312,7 +1312,7 @@ PPCInstr* PPCInstr_FpCmp ( HReg dst, HReg srcL, HReg srcR ) { /* Read/Write Link Register */ PPCInstr* PPCInstr_RdWrLR ( Bool wrLR, HReg gpr ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_RdWrLR; i->Pin.RdWrLR.wrLR = wrLR; i->Pin.RdWrLR.gpr = gpr; @@ -1322,7 +1322,7 @@ PPCInstr* PPCInstr_RdWrLR ( Bool wrLR, HReg gpr ) { /* AltiVec */ PPCInstr* PPCInstr_AvLdSt ( Bool isLoad, UChar sz, HReg reg, PPCAMode* addr ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_AvLdSt; i->Pin.AvLdSt.isLoad = isLoad; i->Pin.AvLdSt.sz = sz; @@ -1331,7 +1331,7 @@ PPCInstr* PPCInstr_AvLdSt ( Bool isLoad, UChar sz, return i; } PPCInstr* PPCInstr_AvUnary ( PPCAvOp op, HReg dst, HReg src ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_AvUnary; i->Pin.AvUnary.op = op; i->Pin.AvUnary.dst = dst; @@ -1340,7 +1340,7 @@ PPCInstr* PPCInstr_AvUnary ( PPCAvOp op, HReg dst, HReg src ) { } PPCInstr* PPCInstr_AvBinary ( PPCAvOp op, HReg dst, HReg srcL, HReg srcR ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_AvBinary; i->Pin.AvBinary.op = op; i->Pin.AvBinary.dst = dst; @@ -1350,7 +1350,7 @@ PPCInstr* PPCInstr_AvBinary ( PPCAvOp op, HReg dst, } PPCInstr* PPCInstr_AvBin8x16 ( PPCAvOp op, HReg dst, HReg srcL, HReg srcR ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_AvBin8x16; i->Pin.AvBin8x16.op = op; i->Pin.AvBin8x16.dst = dst; @@ -1360,7 +1360,7 @@ PPCInstr* PPCInstr_AvBin8x16 ( PPCAvOp op, HReg dst, } PPCInstr* PPCInstr_AvBin16x8 ( PPCAvOp op, HReg dst, HReg srcL, HReg srcR ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_AvBin16x8; i->Pin.AvBin16x8.op = op; i->Pin.AvBin16x8.dst = dst; @@ -1370,7 +1370,7 @@ PPCInstr* PPCInstr_AvBin16x8 ( PPCAvOp op, HReg dst, } PPCInstr* PPCInstr_AvBin32x4 ( PPCAvOp op, HReg dst, HReg srcL, HReg srcR ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_AvBin32x4; i->Pin.AvBin32x4.op = op; i->Pin.AvBin32x4.dst = dst; @@ -1380,7 +1380,7 @@ PPCInstr* PPCInstr_AvBin32x4 ( PPCAvOp op, HReg dst, } PPCInstr* PPCInstr_AvBin64x2 ( PPCAvOp op, HReg dst, HReg srcL, HReg srcR ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_AvBin64x2; i->Pin.AvBin64x2.op = op; i->Pin.AvBin64x2.dst = dst; @@ -1391,7 +1391,7 @@ PPCInstr* PPCInstr_AvBin64x2 ( PPCAvOp op, HReg dst, PPCInstr* PPCInstr_AvBin32Fx4 ( PPCAvFpOp op, HReg dst, HReg srcL, HReg srcR ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_AvBin32Fx4; i->Pin.AvBin32Fx4.op = op; i->Pin.AvBin32Fx4.dst = dst; @@ -1400,7 +1400,7 @@ PPCInstr* PPCInstr_AvBin32Fx4 ( PPCAvFpOp op, HReg dst, return i; } PPCInstr* PPCInstr_AvUn32Fx4 ( PPCAvFpOp op, HReg dst, HReg src ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_AvUn32Fx4; i->Pin.AvUn32Fx4.op = op; i->Pin.AvUn32Fx4.dst = dst; @@ -1408,7 +1408,7 @@ PPCInstr* PPCInstr_AvUn32Fx4 ( PPCAvFpOp op, HReg dst, HReg src ) { return i; } PPCInstr* PPCInstr_AvPerm ( HReg dst, HReg srcL, HReg srcR, HReg ctl ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_AvPerm; i->Pin.AvPerm.dst = dst; i->Pin.AvPerm.srcL = srcL; @@ -1418,7 +1418,7 @@ PPCInstr* PPCInstr_AvPerm ( HReg dst, HReg srcL, HReg srcR, HReg ctl ) { } PPCInstr* PPCInstr_AvSel ( HReg ctl, HReg dst, HReg srcL, HReg srcR ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_AvSel; i->Pin.AvSel.ctl = ctl; i->Pin.AvSel.dst = dst; @@ -1427,7 +1427,7 @@ PPCInstr* PPCInstr_AvSel ( HReg ctl, HReg dst, HReg srcL, HReg srcR ) { return i; } PPCInstr* PPCInstr_AvSh ( Bool shLeft, HReg dst, PPCAMode* addr ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_AvSh; i->Pin.AvSh.shLeft = shLeft; i->Pin.AvSh.dst = dst; @@ -1436,7 +1436,7 @@ PPCInstr* PPCInstr_AvSh ( Bool shLeft, HReg dst, PPCAMode* addr ) { } PPCInstr* PPCInstr_AvShlDbl ( UChar shift, HReg dst, HReg srcL, HReg srcR ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_AvShlDbl; i->Pin.AvShlDbl.shift = shift; i->Pin.AvShlDbl.dst = dst; @@ -1445,7 +1445,7 @@ PPCInstr* PPCInstr_AvShlDbl ( UChar shift, HReg dst, return i; } PPCInstr* PPCInstr_AvSplat ( UChar sz, HReg dst, PPCVI5s* src ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_AvSplat; i->Pin.AvSplat.sz = sz; i->Pin.AvSplat.dst = dst; @@ -1453,7 +1453,7 @@ PPCInstr* PPCInstr_AvSplat ( UChar sz, HReg dst, PPCVI5s* src ) { return i; } PPCInstr* PPCInstr_AvCMov ( PPCCondCode cond, HReg dst, HReg src ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_AvCMov; i->Pin.AvCMov.cond = cond; i->Pin.AvCMov.dst = dst; @@ -1462,13 +1462,13 @@ PPCInstr* PPCInstr_AvCMov ( PPCCondCode cond, HReg dst, HReg src ) { return i; } PPCInstr* PPCInstr_AvLdVSCR ( HReg src ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_AvLdVSCR; i->Pin.AvLdVSCR.src = src; return i; } PPCInstr* PPCInstr_AvCipherV128Unary ( PPCAvOp op, HReg dst, HReg src ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_AvCipherV128Unary; i->Pin.AvCipherV128Unary.op = op; i->Pin.AvCipherV128Unary.dst = dst; @@ -1477,7 +1477,7 @@ PPCInstr* PPCInstr_AvCipherV128Unary ( PPCAvOp op, HReg dst, HReg src ) { } PPCInstr* PPCInstr_AvCipherV128Binary ( PPCAvOp op, HReg dst, HReg srcL, HReg srcR ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_AvCipherV128Binary; i->Pin.AvCipherV128Binary.op = op; i->Pin.AvCipherV128Binary.dst = dst; @@ -1487,7 +1487,7 @@ PPCInstr* PPCInstr_AvCipherV128Binary ( PPCAvOp op, HReg dst, } PPCInstr* PPCInstr_AvHashV128Binary ( PPCAvOp op, HReg dst, HReg src, PPCRI* s_field ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_AvHashV128Binary; i->Pin.AvHashV128Binary.op = op; i->Pin.AvHashV128Binary.dst = dst; @@ -1497,7 +1497,7 @@ PPCInstr* PPCInstr_AvHashV128Binary ( PPCAvOp op, HReg dst, } PPCInstr* PPCInstr_AvBCDV128Trinary ( PPCAvOp op, HReg dst, HReg src1, HReg src2, PPCRI* ps ) { - PPCInstr* i = LibVEX_Alloc(sizeof(PPCInstr)); + PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr)); i->tag = Pin_AvBCDV128Trinary; i->Pin.AvBCDV128Trinary.op = op; i->Pin.AvBCDV128Trinary.dst = dst; diff --git a/VEX/priv/host_ppc_isel.c b/VEX/priv/host_ppc_isel.c index b9f02c0435..71e3d39572 100644 --- a/VEX/priv/host_ppc_isel.c +++ b/VEX/priv/host_ppc_isel.c @@ -6144,7 +6144,7 @@ HInstrArray* iselSB_PPC ( const IRSB* bb, IEndianess = Iend_LE; /* Make up an initial environment to use. */ - env = LibVEX_Alloc(sizeof(ISelEnv)); + env = LibVEX_Alloc_inline(sizeof(ISelEnv)); env->vreg_ctr = 0; /* Are we being ppc32 or ppc64? */ @@ -6163,14 +6163,14 @@ HInstrArray* iselSB_PPC ( const IRSB* bb, * for supporting I128 in 32-bit mode */ env->n_vregmap = bb->tyenv->types_used; - env->vregmapLo = LibVEX_Alloc(env->n_vregmap * sizeof(HReg)); - env->vregmapMedLo = LibVEX_Alloc(env->n_vregmap * sizeof(HReg)); + env->vregmapLo = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg)); + env->vregmapMedLo = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg)); if (mode64) { env->vregmapMedHi = NULL; env->vregmapHi = NULL; } else { - env->vregmapMedHi = LibVEX_Alloc(env->n_vregmap * sizeof(HReg)); - env->vregmapHi = LibVEX_Alloc(env->n_vregmap * sizeof(HReg)); + env->vregmapMedHi = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg)); + env->vregmapHi = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg)); } /* and finally ... */ diff --git a/VEX/priv/host_s390_defs.c b/VEX/priv/host_s390_defs.c index ea2f718189..22550c7257 100644 --- a/VEX/priv/host_s390_defs.c +++ b/VEX/priv/host_s390_defs.c @@ -126,7 +126,7 @@ s390_hreg_get_allocable(Int *nregs, HReg **arr) + 16 /* FPRs */ ; - *arr = LibVEX_Alloc(*nregs * sizeof(HReg)); + *arr = LibVEX_Alloc_inline(*nregs * sizeof(HReg)); i = 0; @@ -205,7 +205,7 @@ fits_unsigned_12bit(Int value) s390_amode * s390_amode_b12(Int d, HReg b) { - s390_amode *am = LibVEX_Alloc(sizeof(s390_amode)); + s390_amode *am = LibVEX_Alloc_inline(sizeof(s390_amode)); vassert(fits_unsigned_12bit(d)); @@ -222,7 +222,7 @@ s390_amode_b12(Int d, HReg b) s390_amode * s390_amode_b20(Int d, HReg b) { - s390_amode *am = LibVEX_Alloc(sizeof(s390_amode)); + s390_amode *am = LibVEX_Alloc_inline(sizeof(s390_amode)); vassert(fits_signed_20bit(d)); @@ -239,7 +239,7 @@ s390_amode_b20(Int d, HReg b) s390_amode * s390_amode_bx12(Int d, HReg b, HReg x) { - s390_amode *am = LibVEX_Alloc(sizeof(s390_amode)); + s390_amode *am = LibVEX_Alloc_inline(sizeof(s390_amode)); vassert(fits_unsigned_12bit(d)); vassert(hregNumber(b) != 0); @@ -258,7 +258,7 @@ s390_amode_bx12(Int d, HReg b, HReg x) s390_amode * s390_amode_bx20(Int d, HReg b, HReg x) { - s390_amode *am = LibVEX_Alloc(sizeof(s390_amode)); + s390_amode *am = LibVEX_Alloc_inline(sizeof(s390_amode)); vassert(fits_signed_20bit(d)); vassert(hregNumber(b) != 0); @@ -5206,7 +5206,7 @@ s390_emit_LDGRw(UChar *p, UChar r1, UChar r2) s390_insn * s390_insn_load(UChar size, HReg dst, s390_amode *src) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); insn->tag = S390_INSN_LOAD; insn->size = size; @@ -5222,7 +5222,7 @@ s390_insn_load(UChar size, HReg dst, s390_amode *src) s390_insn * s390_insn_store(UChar size, s390_amode *dst, HReg src) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); insn->tag = S390_INSN_STORE; insn->size = size; @@ -5238,7 +5238,7 @@ s390_insn_store(UChar size, s390_amode *dst, HReg src) s390_insn * s390_insn_move(UChar size, HReg dst, HReg src) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); insn->tag = S390_INSN_MOVE; insn->size = size; @@ -5254,7 +5254,7 @@ s390_insn_move(UChar size, HReg dst, HReg src) s390_insn * s390_insn_memcpy(UChar size, s390_amode *dst, s390_amode *src) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); /* This insn will be mapped to MVC which requires base register plus 12-bit displacement */ @@ -5275,7 +5275,7 @@ s390_insn_memcpy(UChar size, s390_amode *dst, s390_amode *src) s390_insn * s390_insn_cond_move(UChar size, s390_cc_t cond, HReg dst, s390_opnd_RMI src) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); insn->tag = S390_INSN_COND_MOVE; insn->size = size; @@ -5292,7 +5292,7 @@ s390_insn_cond_move(UChar size, s390_cc_t cond, HReg dst, s390_opnd_RMI src) s390_insn * s390_insn_load_immediate(UChar size, HReg dst, ULong value) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); insn->tag = S390_INSN_LOAD_IMMEDIATE; insn->size = size; @@ -5306,7 +5306,7 @@ s390_insn_load_immediate(UChar size, HReg dst, ULong value) s390_insn * s390_insn_alu(UChar size, s390_alu_t tag, HReg dst, s390_opnd_RMI op2) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); insn->tag = S390_INSN_ALU; insn->size = size; @@ -5322,7 +5322,7 @@ s390_insn * s390_insn_mul(UChar size, HReg dst_hi, HReg dst_lo, s390_opnd_RMI op2, Bool signed_multiply) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(! hregIsVirtual(dst_hi)); vassert(! hregIsVirtual(dst_lo)); @@ -5341,7 +5341,7 @@ s390_insn * s390_insn_div(UChar size, HReg op1_hi, HReg op1_lo, s390_opnd_RMI op2, Bool signed_divide) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(size == 4 || size == 8); vassert(! hregIsVirtual(op1_hi)); @@ -5360,7 +5360,7 @@ s390_insn_div(UChar size, HReg op1_hi, HReg op1_lo, s390_opnd_RMI op2, s390_insn * s390_insn_divs(UChar size, HReg rem, HReg op1, s390_opnd_RMI op2) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(size == 8); vassert(! hregIsVirtual(op1)); @@ -5379,7 +5379,7 @@ s390_insn_divs(UChar size, HReg rem, HReg op1, s390_opnd_RMI op2) s390_insn * s390_insn_clz(UChar size, HReg num_bits, HReg clobber, s390_opnd_RMI src) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(size == 8); vassert(! hregIsVirtual(num_bits)); @@ -5398,7 +5398,7 @@ s390_insn_clz(UChar size, HReg num_bits, HReg clobber, s390_opnd_RMI src) s390_insn * s390_insn_unop(UChar size, s390_unop_t tag, HReg dst, s390_opnd_RMI opnd) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); insn->tag = S390_INSN_UNOP; insn->size = size; @@ -5413,7 +5413,7 @@ s390_insn_unop(UChar size, s390_unop_t tag, HReg dst, s390_opnd_RMI opnd) s390_insn * s390_insn_test(UChar size, s390_opnd_RMI src) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(size == 4 || size == 8); @@ -5428,7 +5428,7 @@ s390_insn_test(UChar size, s390_opnd_RMI src) s390_insn * s390_insn_cc2bool(HReg dst, s390_cc_t cond) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); insn->tag = S390_INSN_CC2BOOL; insn->size = 0; /* does not matter */ @@ -5442,7 +5442,7 @@ s390_insn_cc2bool(HReg dst, s390_cc_t cond) s390_insn * s390_insn_cas(UChar size, HReg op1, s390_amode *op2, HReg op3, HReg old_mem) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(size == 4 || size == 8); vassert(hregNumber(op2->x) == 0); @@ -5464,8 +5464,8 @@ s390_insn_cdas(UChar size, HReg op1_high, HReg op1_low, s390_amode *op2, HReg op3_high, HReg op3_low, HReg old_mem_high, HReg old_mem_low, HReg scratch) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); - s390_cdas *cdas = LibVEX_Alloc(sizeof(s390_cdas)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); + s390_cdas *cdas = LibVEX_Alloc_inline(sizeof(s390_cdas)); vassert(size == 4 || size == 8); vassert(hregNumber(op2->x) == 0); @@ -5493,7 +5493,7 @@ s390_insn * s390_insn_compare(UChar size, HReg src1, s390_opnd_RMI src2, Bool signed_comparison) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(size == 4 || size == 8); @@ -5511,8 +5511,8 @@ s390_insn * s390_insn_helper_call(s390_cc_t cond, Addr64 target, UInt num_args, const HChar *name, RetLoc rloc) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); - s390_helper_call *helper_call = LibVEX_Alloc(sizeof(s390_helper_call)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); + s390_helper_call *helper_call = LibVEX_Alloc_inline(sizeof(s390_helper_call)); insn->tag = S390_INSN_HELPER_CALL; insn->size = 0; /* does not matter */ @@ -5534,7 +5534,7 @@ s390_insn * s390_insn_bfp_triop(UChar size, s390_bfp_triop_t tag, HReg dst, HReg op2, HReg op3) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(size == 4 || size == 8); @@ -5552,7 +5552,7 @@ s390_insn_bfp_triop(UChar size, s390_bfp_triop_t tag, HReg dst, HReg op2, s390_insn * s390_insn_bfp_binop(UChar size, s390_bfp_binop_t tag, HReg dst, HReg op2) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(size == 4 || size == 8); @@ -5571,7 +5571,7 @@ s390_insn_bfp_binop(UChar size, s390_bfp_binop_t tag, HReg dst, HReg op2) s390_insn * s390_insn_bfp_unop(UChar size, s390_bfp_unop_t tag, HReg dst, HReg op) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(size == 4 || size == 8); @@ -5590,7 +5590,7 @@ s390_insn_bfp_unop(UChar size, s390_bfp_unop_t tag, HReg dst, HReg op) s390_insn * s390_insn_bfp_compare(UChar size, HReg dst, HReg op1, HReg op2) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(size == 4 || size == 8); @@ -5610,7 +5610,7 @@ s390_insn * s390_insn_bfp_convert(UChar size, s390_bfp_conv_t tag, HReg dst, HReg op, s390_bfp_round_t rounding_mode) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(size == 4 || size == 8); @@ -5646,7 +5646,7 @@ s390_insn * s390_insn_bfp128_binop(UChar size, s390_bfp_binop_t tag, HReg dst_hi, HReg dst_lo, HReg op2_hi, HReg op2_lo) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(size == 16); vassert(is_valid_fp128_regpair(dst_hi, dst_lo)); @@ -5668,7 +5668,7 @@ s390_insn * s390_insn_bfp128_unop(UChar size, s390_bfp_unop_t tag, HReg dst_hi, HReg dst_lo, HReg op_hi, HReg op_lo) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(size == 16); vassert(is_valid_fp128_regpair(dst_hi, dst_lo)); @@ -5690,7 +5690,7 @@ s390_insn * s390_insn_bfp128_compare(UChar size, HReg dst, HReg op1_hi, HReg op1_lo, HReg op2_hi, HReg op2_lo) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(size == 16); vassert(is_valid_fp128_regpair(op1_hi, op1_lo)); @@ -5713,7 +5713,7 @@ s390_insn_bfp128_convert(UChar size, s390_bfp_conv_t tag, HReg dst_hi, HReg dst_lo, HReg op_hi, HReg op_lo, s390_bfp_round_t rounding_mode) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); if (size == 16) { /* From smaller size to 16 bytes */ @@ -5764,8 +5764,8 @@ s390_insn * s390_insn_dfp_binop(UChar size, s390_dfp_binop_t tag, HReg dst, HReg op2, HReg op3, s390_dfp_round_t rounding_mode) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); - s390_dfp_binop *dfp_binop = LibVEX_Alloc(sizeof(s390_dfp_binop)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); + s390_dfp_binop *dfp_binop = LibVEX_Alloc_inline(sizeof(s390_dfp_binop)); vassert(size == 8); @@ -5789,7 +5789,7 @@ s390_insn_dfp_binop(UChar size, s390_dfp_binop_t tag, HReg dst, HReg op2, s390_insn * s390_insn_dfp_unop(UChar size, s390_dfp_unop_t tag, HReg dst, HReg op) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(size == 8); @@ -5809,7 +5809,7 @@ s390_insn * s390_insn_dfp_intop(UChar size, s390_dfp_intop_t tag, HReg dst, HReg op2, HReg op3) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(size == 8); @@ -5830,7 +5830,7 @@ s390_insn * s390_insn_dfp_compare(UChar size, s390_dfp_cmp_t tag, HReg dst, HReg op1, HReg op2) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(size == 8); @@ -5851,7 +5851,7 @@ s390_insn * s390_insn_dfp_convert(UChar size, s390_dfp_conv_t tag, HReg dst, HReg op, s390_dfp_round_t rounding_mode) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(size == 4 || size == 8); @@ -5872,7 +5872,7 @@ s390_insn * s390_insn_dfp_reround(UChar size, HReg dst, HReg op2, HReg op3, s390_dfp_round_t rounding_mode) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(size == 8); @@ -5893,8 +5893,8 @@ s390_insn * s390_insn_fp_convert(UChar size, s390_fp_conv_t tag, HReg dst, HReg op, HReg r1, s390_dfp_round_t rounding_mode) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); - s390_fp_convert *fp_convert = LibVEX_Alloc(sizeof(s390_fp_convert)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); + s390_fp_convert *fp_convert = LibVEX_Alloc_inline(sizeof(s390_fp_convert)); vassert(size == 4 || size == 8); @@ -5919,8 +5919,8 @@ s390_insn_fp128_convert(UChar size, s390_fp_conv_t tag, HReg dst_hi, HReg dst_lo, HReg op_hi, HReg op_lo, HReg r1, s390_dfp_round_t rounding_mode) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); - s390_fp_convert *fp_convert = LibVEX_Alloc(sizeof(s390_fp_convert)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); + s390_fp_convert *fp_convert = LibVEX_Alloc_inline(sizeof(s390_fp_convert)); vassert(size == 16); @@ -5945,8 +5945,8 @@ s390_insn_dfp128_binop(UChar size, s390_dfp_binop_t tag, HReg dst_hi, HReg dst_lo, HReg op2_hi, HReg op2_lo, HReg op3_hi, HReg op3_lo, s390_dfp_round_t rounding_mode) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); - s390_dfp_binop *dfp_binop = LibVEX_Alloc(sizeof(s390_dfp_binop)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); + s390_dfp_binop *dfp_binop = LibVEX_Alloc_inline(sizeof(s390_dfp_binop)); vassert(size == 16); vassert(is_valid_fp128_regpair(dst_hi, dst_lo)); @@ -5974,7 +5974,7 @@ s390_insn * s390_insn_dfp128_unop(UChar size, s390_dfp_unop_t tag, HReg dst, HReg op_hi, HReg op_lo) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); /* destination is an 8 byte integer value */ vassert(size == 8); @@ -5996,7 +5996,7 @@ s390_insn * s390_insn_dfp128_intop(UChar size, s390_dfp_intop_t tag, HReg dst_hi, HReg dst_lo, HReg op2, HReg op3_hi, HReg op3_lo) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(size == 16); vassert(is_valid_fp128_regpair(dst_hi, dst_lo)); @@ -6019,7 +6019,7 @@ s390_insn * s390_insn_dfp128_compare(UChar size, s390_dfp_cmp_t tag, HReg dst, HReg op1_hi, HReg op1_lo, HReg op2_hi, HReg op2_lo) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(size == 16); vassert(is_valid_fp128_regpair(op1_hi, op1_lo)); @@ -6043,7 +6043,7 @@ s390_insn_dfp128_convert(UChar size, s390_dfp_conv_t tag, HReg dst_hi, HReg dst_lo, HReg op_hi, HReg op_lo, s390_dfp_round_t rounding_mode) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); if (size == 16) { /* From smaller size to 16 bytes */ @@ -6095,7 +6095,7 @@ s390_insn_dfp128_reround(UChar size, HReg dst_hi, HReg dst_lo, HReg op2, HReg op3_hi, HReg op3_lo, s390_dfp_round_t rounding_mode) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(size == 16); vassert(is_valid_fp128_regpair(dst_hi, dst_lo)); @@ -6117,7 +6117,7 @@ s390_insn_dfp128_reround(UChar size, HReg dst_hi, HReg dst_lo, HReg op2, s390_insn * s390_insn_mfence(void) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); insn->tag = S390_INSN_MFENCE; insn->size = 0; /* not needed */ @@ -6129,7 +6129,7 @@ s390_insn_mfence(void) s390_insn * s390_insn_mimm(UChar size, s390_amode *dst, ULong value) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); /* This insn will be mapped to insns that require base register plus 12-bit displacement */ @@ -6147,7 +6147,7 @@ s390_insn_mimm(UChar size, s390_amode *dst, ULong value) s390_insn * s390_insn_madd(UChar size, s390_amode *dst, UChar delta, ULong value) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(size == 4 || size == 8); @@ -6172,7 +6172,7 @@ s390_insn_set_fpc_bfprm(UChar size, HReg mode) { vassert(size == 4); - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); insn->tag = S390_INSN_SET_FPC_BFPRM; insn->size = size; @@ -6187,7 +6187,7 @@ s390_insn_set_fpc_dfprm(UChar size, HReg mode) { vassert(size == 4); - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); insn->tag = S390_INSN_SET_FPC_DFPRM; insn->size = size; @@ -6201,7 +6201,7 @@ s390_insn * s390_insn_xdirect(s390_cc_t cond, Addr64 dst, s390_amode *guest_IA, Bool to_fast_entry) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(guest_IA->tag == S390_AMODE_B12); @@ -6220,7 +6220,7 @@ s390_insn_xdirect(s390_cc_t cond, Addr64 dst, s390_amode *guest_IA, s390_insn * s390_insn_xindir(s390_cc_t cond, HReg dst, s390_amode *guest_IA) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(guest_IA->tag == S390_AMODE_B12); @@ -6239,7 +6239,7 @@ s390_insn * s390_insn_xassisted(s390_cc_t cond, HReg dst, s390_amode *guest_IA, IRJumpKind kind) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(guest_IA->tag == S390_AMODE_B12); @@ -6258,7 +6258,7 @@ s390_insn_xassisted(s390_cc_t cond, HReg dst, s390_amode *guest_IA, s390_insn * s390_insn_evcheck(s390_amode *counter, s390_amode *fail_addr) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); vassert(counter->tag == S390_AMODE_B12); vassert(fail_addr->tag == S390_AMODE_B12); @@ -6276,7 +6276,7 @@ s390_insn_evcheck(s390_amode *counter, s390_amode *fail_addr) s390_insn * s390_insn_profinc(void) { - s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn)); + s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn)); insn->tag = S390_INSN_PROFINC; insn->size = 0; /* does not matter */ diff --git a/VEX/priv/host_s390_isel.c b/VEX/priv/host_s390_isel.c index f7af2a934c..436e9e1ea3 100644 --- a/VEX/priv/host_s390_isel.c +++ b/VEX/priv/host_s390_isel.c @@ -4065,7 +4065,7 @@ iselSB_S390(const IRSB *bb, VexArch arch_host, const VexArchInfo *archinfo_host, vassert(archinfo_host->endness == VexEndnessBE); /* Make up an initial environment to use. */ - env = LibVEX_Alloc(sizeof(ISelEnv)); + env = LibVEX_Alloc_inline(sizeof(ISelEnv)); env->vreg_ctr = 0; /* Set up output code array. */ @@ -4087,8 +4087,8 @@ iselSB_S390(const IRSB *bb, VexArch arch_host, const VexArchInfo *archinfo_host, vassert(bb->tyenv->types_used >= 0); env->n_vregmap = bb->tyenv->types_used; - env->vregmap = LibVEX_Alloc(env->n_vregmap * sizeof(HReg)); - env->vregmapHI = LibVEX_Alloc(env->n_vregmap * sizeof(HReg)); + env->vregmap = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg)); + env->vregmapHI = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg)); env->previous_bfp_rounding_mode = NULL; env->previous_dfp_rounding_mode = NULL; diff --git a/VEX/priv/host_x86_defs.c b/VEX/priv/host_x86_defs.c index a059b7875b..1d6fec9d10 100644 --- a/VEX/priv/host_x86_defs.c +++ b/VEX/priv/host_x86_defs.c @@ -105,7 +105,7 @@ HReg hregX86_XMM7 ( void ) { return mkHReg(7, HRcVec128, False); } void getAllocableRegs_X86 ( Int* nregs, HReg** arr ) { *nregs = 20; - *arr = LibVEX_Alloc(*nregs * sizeof(HReg)); + *arr = LibVEX_Alloc_inline(*nregs * sizeof(HReg)); (*arr)[0] = hregX86_EAX(); (*arr)[1] = hregX86_EBX(); (*arr)[2] = hregX86_ECX(); @@ -159,14 +159,14 @@ const HChar* showX86CondCode ( X86CondCode cond ) /* --------- X86AMode: memory address expressions. --------- */ X86AMode* X86AMode_IR ( UInt imm32, HReg reg ) { - X86AMode* am = LibVEX_Alloc(sizeof(X86AMode)); + X86AMode* am = LibVEX_Alloc_inline(sizeof(X86AMode)); am->tag = Xam_IR; am->Xam.IR.imm = imm32; am->Xam.IR.reg = reg; return am; } X86AMode* X86AMode_IRRS ( UInt imm32, HReg base, HReg indEx, Int shift ) { - X86AMode* am = LibVEX_Alloc(sizeof(X86AMode)); + X86AMode* am = LibVEX_Alloc_inline(sizeof(X86AMode)); am->tag = Xam_IRRS; am->Xam.IRRS.imm = imm32; am->Xam.IRRS.base = base; @@ -241,19 +241,19 @@ static void mapRegs_X86AMode ( HRegRemap* m, X86AMode* am ) { /* --------- Operand, which can be reg, immediate or memory. --------- */ X86RMI* X86RMI_Imm ( UInt imm32 ) { - X86RMI* op = LibVEX_Alloc(sizeof(X86RMI)); + X86RMI* op = LibVEX_Alloc_inline(sizeof(X86RMI)); op->tag = Xrmi_Imm; op->Xrmi.Imm.imm32 = imm32; return op; } X86RMI* X86RMI_Reg ( HReg reg ) { - X86RMI* op = LibVEX_Alloc(sizeof(X86RMI)); + X86RMI* op = LibVEX_Alloc_inline(sizeof(X86RMI)); op->tag = Xrmi_Reg; op->Xrmi.Reg.reg = reg; return op; } X86RMI* X86RMI_Mem ( X86AMode* am ) { - X86RMI* op = LibVEX_Alloc(sizeof(X86RMI)); + X86RMI* op = LibVEX_Alloc_inline(sizeof(X86RMI)); op->tag = Xrmi_Mem; op->Xrmi.Mem.am = am; return op; @@ -312,13 +312,13 @@ static void mapRegs_X86RMI ( HRegRemap* m, X86RMI* op ) { /* --------- Operand, which can be reg or immediate only. --------- */ X86RI* X86RI_Imm ( UInt imm32 ) { - X86RI* op = LibVEX_Alloc(sizeof(X86RI)); + X86RI* op = LibVEX_Alloc_inline(sizeof(X86RI)); op->tag = Xri_Imm; op->Xri.Imm.imm32 = imm32; return op; } X86RI* X86RI_Reg ( HReg reg ) { - X86RI* op = LibVEX_Alloc(sizeof(X86RI)); + X86RI* op = LibVEX_Alloc_inline(sizeof(X86RI)); op->tag = Xri_Reg; op->Xri.Reg.reg = reg; return op; @@ -368,13 +368,13 @@ static void mapRegs_X86RI ( HRegRemap* m, X86RI* op ) { /* --------- Operand, which can be reg or memory only. --------- */ X86RM* X86RM_Reg ( HReg reg ) { - X86RM* op = LibVEX_Alloc(sizeof(X86RM)); + X86RM* op = LibVEX_Alloc_inline(sizeof(X86RM)); op->tag = Xrm_Reg; op->Xrm.Reg.reg = reg; return op; } X86RM* X86RM_Mem ( X86AMode* am ) { - X86RM* op = LibVEX_Alloc(sizeof(X86RM)); + X86RM* op = LibVEX_Alloc_inline(sizeof(X86RM)); op->tag = Xrm_Mem; op->Xrm.Mem.am = am; return op; @@ -563,7 +563,7 @@ const HChar* showX86SseOp ( X86SseOp op ) { } X86Instr* X86Instr_Alu32R ( X86AluOp op, X86RMI* src, HReg dst ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_Alu32R; i->Xin.Alu32R.op = op; i->Xin.Alu32R.src = src; @@ -571,7 +571,7 @@ X86Instr* X86Instr_Alu32R ( X86AluOp op, X86RMI* src, HReg dst ) { return i; } X86Instr* X86Instr_Alu32M ( X86AluOp op, X86RI* src, X86AMode* dst ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_Alu32M; i->Xin.Alu32M.op = op; i->Xin.Alu32M.src = src; @@ -580,7 +580,7 @@ X86Instr* X86Instr_Alu32M ( X86AluOp op, X86RI* src, X86AMode* dst ) { return i; } X86Instr* X86Instr_Sh32 ( X86ShiftOp op, UInt src, HReg dst ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_Sh32; i->Xin.Sh32.op = op; i->Xin.Sh32.src = src; @@ -588,42 +588,42 @@ X86Instr* X86Instr_Sh32 ( X86ShiftOp op, UInt src, HReg dst ) { return i; } X86Instr* X86Instr_Test32 ( UInt imm32, X86RM* dst ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_Test32; i->Xin.Test32.imm32 = imm32; i->Xin.Test32.dst = dst; return i; } X86Instr* X86Instr_Unary32 ( X86UnaryOp op, HReg dst ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_Unary32; i->Xin.Unary32.op = op; i->Xin.Unary32.dst = dst; return i; } X86Instr* X86Instr_Lea32 ( X86AMode* am, HReg dst ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_Lea32; i->Xin.Lea32.am = am; i->Xin.Lea32.dst = dst; return i; } X86Instr* X86Instr_MulL ( Bool syned, X86RM* src ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_MulL; i->Xin.MulL.syned = syned; i->Xin.MulL.src = src; return i; } X86Instr* X86Instr_Div ( Bool syned, X86RM* src ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_Div; i->Xin.Div.syned = syned; i->Xin.Div.src = src; return i; } X86Instr* X86Instr_Sh3232 ( X86ShiftOp op, UInt amt, HReg src, HReg dst ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_Sh3232; i->Xin.Sh3232.op = op; i->Xin.Sh3232.amt = amt; @@ -633,14 +633,14 @@ X86Instr* X86Instr_Sh3232 ( X86ShiftOp op, UInt amt, HReg src, HReg dst ) { return i; } X86Instr* X86Instr_Push( X86RMI* src ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_Push; i->Xin.Push.src = src; return i; } X86Instr* X86Instr_Call ( X86CondCode cond, Addr32 target, Int regparms, RetLoc rloc ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_Call; i->Xin.Call.cond = cond; i->Xin.Call.target = target; @@ -652,7 +652,7 @@ X86Instr* X86Instr_Call ( X86CondCode cond, Addr32 target, Int regparms, } X86Instr* X86Instr_XDirect ( Addr32 dstGA, X86AMode* amEIP, X86CondCode cond, Bool toFastEP ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_XDirect; i->Xin.XDirect.dstGA = dstGA; i->Xin.XDirect.amEIP = amEIP; @@ -662,7 +662,7 @@ X86Instr* X86Instr_XDirect ( Addr32 dstGA, X86AMode* amEIP, } X86Instr* X86Instr_XIndir ( HReg dstGA, X86AMode* amEIP, X86CondCode cond ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_XIndir; i->Xin.XIndir.dstGA = dstGA; i->Xin.XIndir.amEIP = amEIP; @@ -671,7 +671,7 @@ X86Instr* X86Instr_XIndir ( HReg dstGA, X86AMode* amEIP, } X86Instr* X86Instr_XAssisted ( HReg dstGA, X86AMode* amEIP, X86CondCode cond, IRJumpKind jk ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_XAssisted; i->Xin.XAssisted.dstGA = dstGA; i->Xin.XAssisted.amEIP = amEIP; @@ -680,7 +680,7 @@ X86Instr* X86Instr_XAssisted ( HReg dstGA, X86AMode* amEIP, return i; } X86Instr* X86Instr_CMov32 ( X86CondCode cond, X86RM* src, HReg dst ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_CMov32; i->Xin.CMov32.cond = cond; i->Xin.CMov32.src = src; @@ -690,7 +690,7 @@ X86Instr* X86Instr_CMov32 ( X86CondCode cond, X86RM* src, HReg dst ) { } X86Instr* X86Instr_LoadEX ( UChar szSmall, Bool syned, X86AMode* src, HReg dst ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_LoadEX; i->Xin.LoadEX.szSmall = szSmall; i->Xin.LoadEX.syned = syned; @@ -700,7 +700,7 @@ X86Instr* X86Instr_LoadEX ( UChar szSmall, Bool syned, return i; } X86Instr* X86Instr_Store ( UChar sz, HReg src, X86AMode* dst ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_Store; i->Xin.Store.sz = sz; i->Xin.Store.src = src; @@ -709,14 +709,14 @@ X86Instr* X86Instr_Store ( UChar sz, HReg src, X86AMode* dst ) { return i; } X86Instr* X86Instr_Set32 ( X86CondCode cond, HReg dst ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_Set32; i->Xin.Set32.cond = cond; i->Xin.Set32.dst = dst; return i; } X86Instr* X86Instr_Bsfr32 ( Bool isFwds, HReg src, HReg dst ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_Bsfr32; i->Xin.Bsfr32.isFwds = isFwds; i->Xin.Bsfr32.src = src; @@ -724,7 +724,7 @@ X86Instr* X86Instr_Bsfr32 ( Bool isFwds, HReg src, HReg dst ) { return i; } X86Instr* X86Instr_MFence ( UInt hwcaps ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_MFence; i->Xin.MFence.hwcaps = hwcaps; vassert(0 == (hwcaps & ~(VEX_HWCAPS_X86_MMXEXT @@ -735,7 +735,7 @@ X86Instr* X86Instr_MFence ( UInt hwcaps ) { return i; } X86Instr* X86Instr_ACAS ( X86AMode* addr, UChar sz ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_ACAS; i->Xin.ACAS.addr = addr; i->Xin.ACAS.sz = sz; @@ -743,14 +743,14 @@ X86Instr* X86Instr_ACAS ( X86AMode* addr, UChar sz ) { return i; } X86Instr* X86Instr_DACAS ( X86AMode* addr ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_DACAS; i->Xin.DACAS.addr = addr; return i; } X86Instr* X86Instr_FpUnary ( X86FpOp op, HReg src, HReg dst ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_FpUnary; i->Xin.FpUnary.op = op; i->Xin.FpUnary.src = src; @@ -758,7 +758,7 @@ X86Instr* X86Instr_FpUnary ( X86FpOp op, HReg src, HReg dst ) { return i; } X86Instr* X86Instr_FpBinary ( X86FpOp op, HReg srcL, HReg srcR, HReg dst ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_FpBinary; i->Xin.FpBinary.op = op; i->Xin.FpBinary.srcL = srcL; @@ -767,7 +767,7 @@ X86Instr* X86Instr_FpBinary ( X86FpOp op, HReg srcL, HReg srcR, HReg dst ) { return i; } X86Instr* X86Instr_FpLdSt ( Bool isLoad, UChar sz, HReg reg, X86AMode* addr ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_FpLdSt; i->Xin.FpLdSt.isLoad = isLoad; i->Xin.FpLdSt.sz = sz; @@ -778,7 +778,7 @@ X86Instr* X86Instr_FpLdSt ( Bool isLoad, UChar sz, HReg reg, X86AMode* addr ) { } X86Instr* X86Instr_FpLdStI ( Bool isLoad, UChar sz, HReg reg, X86AMode* addr ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_FpLdStI; i->Xin.FpLdStI.isLoad = isLoad; i->Xin.FpLdStI.sz = sz; @@ -788,14 +788,14 @@ X86Instr* X86Instr_FpLdStI ( Bool isLoad, UChar sz, return i; } X86Instr* X86Instr_Fp64to32 ( HReg src, HReg dst ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_Fp64to32; i->Xin.Fp64to32.src = src; i->Xin.Fp64to32.dst = dst; return i; } X86Instr* X86Instr_FpCMov ( X86CondCode cond, HReg src, HReg dst ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_FpCMov; i->Xin.FpCMov.cond = cond; i->Xin.FpCMov.src = src; @@ -804,18 +804,18 @@ X86Instr* X86Instr_FpCMov ( X86CondCode cond, HReg src, HReg dst ) { return i; } X86Instr* X86Instr_FpLdCW ( X86AMode* addr ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_FpLdCW; i->Xin.FpLdCW.addr = addr; return i; } X86Instr* X86Instr_FpStSW_AX ( void ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_FpStSW_AX; return i; } X86Instr* X86Instr_FpCmp ( HReg srcL, HReg srcR, HReg dst ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_FpCmp; i->Xin.FpCmp.srcL = srcL; i->Xin.FpCmp.srcR = srcR; @@ -823,7 +823,7 @@ X86Instr* X86Instr_FpCmp ( HReg srcL, HReg srcR, HReg dst ) { return i; } X86Instr* X86Instr_SseConst ( UShort con, HReg dst ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_SseConst; i->Xin.SseConst.con = con; i->Xin.SseConst.dst = dst; @@ -831,7 +831,7 @@ X86Instr* X86Instr_SseConst ( UShort con, HReg dst ) { return i; } X86Instr* X86Instr_SseLdSt ( Bool isLoad, HReg reg, X86AMode* addr ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_SseLdSt; i->Xin.SseLdSt.isLoad = isLoad; i->Xin.SseLdSt.reg = reg; @@ -840,7 +840,7 @@ X86Instr* X86Instr_SseLdSt ( Bool isLoad, HReg reg, X86AMode* addr ) { } X86Instr* X86Instr_SseLdzLO ( Int sz, HReg reg, X86AMode* addr ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_SseLdzLO; i->Xin.SseLdzLO.sz = toUChar(sz); i->Xin.SseLdzLO.reg = reg; @@ -849,7 +849,7 @@ X86Instr* X86Instr_SseLdzLO ( Int sz, HReg reg, X86AMode* addr ) return i; } X86Instr* X86Instr_Sse32Fx4 ( X86SseOp op, HReg src, HReg dst ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_Sse32Fx4; i->Xin.Sse32Fx4.op = op; i->Xin.Sse32Fx4.src = src; @@ -858,7 +858,7 @@ X86Instr* X86Instr_Sse32Fx4 ( X86SseOp op, HReg src, HReg dst ) { return i; } X86Instr* X86Instr_Sse32FLo ( X86SseOp op, HReg src, HReg dst ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_Sse32FLo; i->Xin.Sse32FLo.op = op; i->Xin.Sse32FLo.src = src; @@ -867,7 +867,7 @@ X86Instr* X86Instr_Sse32FLo ( X86SseOp op, HReg src, HReg dst ) { return i; } X86Instr* X86Instr_Sse64Fx2 ( X86SseOp op, HReg src, HReg dst ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_Sse64Fx2; i->Xin.Sse64Fx2.op = op; i->Xin.Sse64Fx2.src = src; @@ -876,7 +876,7 @@ X86Instr* X86Instr_Sse64Fx2 ( X86SseOp op, HReg src, HReg dst ) { return i; } X86Instr* X86Instr_Sse64FLo ( X86SseOp op, HReg src, HReg dst ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_Sse64FLo; i->Xin.Sse64FLo.op = op; i->Xin.Sse64FLo.src = src; @@ -885,7 +885,7 @@ X86Instr* X86Instr_Sse64FLo ( X86SseOp op, HReg src, HReg dst ) { return i; } X86Instr* X86Instr_SseReRg ( X86SseOp op, HReg re, HReg rg ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_SseReRg; i->Xin.SseReRg.op = op; i->Xin.SseReRg.src = re; @@ -893,7 +893,7 @@ X86Instr* X86Instr_SseReRg ( X86SseOp op, HReg re, HReg rg ) { return i; } X86Instr* X86Instr_SseCMov ( X86CondCode cond, HReg src, HReg dst ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_SseCMov; i->Xin.SseCMov.cond = cond; i->Xin.SseCMov.src = src; @@ -902,7 +902,7 @@ X86Instr* X86Instr_SseCMov ( X86CondCode cond, HReg src, HReg dst ) { return i; } X86Instr* X86Instr_SseShuf ( Int order, HReg src, HReg dst ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_SseShuf; i->Xin.SseShuf.order = order; i->Xin.SseShuf.src = src; @@ -912,14 +912,14 @@ X86Instr* X86Instr_SseShuf ( Int order, HReg src, HReg dst ) { } X86Instr* X86Instr_EvCheck ( X86AMode* amCounter, X86AMode* amFailAddr ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_EvCheck; i->Xin.EvCheck.amCounter = amCounter; i->Xin.EvCheck.amFailAddr = amFailAddr; return i; } X86Instr* X86Instr_ProfInc ( void ) { - X86Instr* i = LibVEX_Alloc(sizeof(X86Instr)); + X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr)); i->tag = Xin_ProfInc; return i; } diff --git a/VEX/priv/host_x86_isel.c b/VEX/priv/host_x86_isel.c index bfcaed2dc5..8eeb3bf6b2 100644 --- a/VEX/priv/host_x86_isel.c +++ b/VEX/priv/host_x86_isel.c @@ -4438,7 +4438,7 @@ HInstrArray* iselSB_X86 ( const IRSB* bb, vassert(archinfo_host->endness == VexEndnessLE); /* Make up an initial environment to use. */ - env = LibVEX_Alloc(sizeof(ISelEnv)); + env = LibVEX_Alloc_inline(sizeof(ISelEnv)); env->vreg_ctr = 0; /* Set up output code array. */ @@ -4450,8 +4450,8 @@ HInstrArray* iselSB_X86 ( const IRSB* bb, /* Make up an IRTemp -> virtual HReg mapping. This doesn't change as we go along. */ env->n_vregmap = bb->tyenv->types_used; - env->vregmap = LibVEX_Alloc(env->n_vregmap * sizeof(HReg)); - env->vregmapHI = LibVEX_Alloc(env->n_vregmap * sizeof(HReg)); + env->vregmap = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg)); + env->vregmapHI = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg)); /* and finally ... */ env->chainingAllowed = chainingAllowed; diff --git a/VEX/priv/ir_defs.c b/VEX/priv/ir_defs.c index c56095cb58..7cc66cccdb 100644 --- a/VEX/priv/ir_defs.c +++ b/VEX/priv/ir_defs.c @@ -1664,7 +1664,7 @@ void ppIRSB ( const IRSB* bb ) IRConst* IRConst_U1 ( Bool bit ) { - IRConst* c = LibVEX_Alloc(sizeof(IRConst)); + IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst)); c->tag = Ico_U1; c->Ico.U1 = bit; /* call me paranoid; I don't care :-) */ @@ -1673,70 +1673,70 @@ IRConst* IRConst_U1 ( Bool bit ) } IRConst* IRConst_U8 ( UChar u8 ) { - IRConst* c = LibVEX_Alloc(sizeof(IRConst)); + IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst)); c->tag = Ico_U8; c->Ico.U8 = u8; return c; } IRConst* IRConst_U16 ( UShort u16 ) { - IRConst* c = LibVEX_Alloc(sizeof(IRConst)); + IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst)); c->tag = Ico_U16; c->Ico.U16 = u16; return c; } IRConst* IRConst_U32 ( UInt u32 ) { - IRConst* c = LibVEX_Alloc(sizeof(IRConst)); + IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst)); c->tag = Ico_U32; c->Ico.U32 = u32; return c; } IRConst* IRConst_U64 ( ULong u64 ) { - IRConst* c = LibVEX_Alloc(sizeof(IRConst)); + IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst)); c->tag = Ico_U64; c->Ico.U64 = u64; return c; } IRConst* IRConst_F32 ( Float f32 ) { - IRConst* c = LibVEX_Alloc(sizeof(IRConst)); + IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst)); c->tag = Ico_F32; c->Ico.F32 = f32; return c; } IRConst* IRConst_F32i ( UInt f32i ) { - IRConst* c = LibVEX_Alloc(sizeof(IRConst)); + IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst)); c->tag = Ico_F32i; c->Ico.F32i = f32i; return c; } IRConst* IRConst_F64 ( Double f64 ) { - IRConst* c = LibVEX_Alloc(sizeof(IRConst)); + IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst)); c->tag = Ico_F64; c->Ico.F64 = f64; return c; } IRConst* IRConst_F64i ( ULong f64i ) { - IRConst* c = LibVEX_Alloc(sizeof(IRConst)); + IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst)); c->tag = Ico_F64i; c->Ico.F64i = f64i; return c; } IRConst* IRConst_V128 ( UShort con ) { - IRConst* c = LibVEX_Alloc(sizeof(IRConst)); + IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst)); c->tag = Ico_V128; c->Ico.V128 = con; return c; } IRConst* IRConst_V256 ( UInt con ) { - IRConst* c = LibVEX_Alloc(sizeof(IRConst)); + IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst)); c->tag = Ico_V256; c->Ico.V256 = con; return c; @@ -1746,7 +1746,7 @@ IRConst* IRConst_V256 ( UInt con ) IRCallee* mkIRCallee ( Int regparms, const HChar* name, void* addr ) { - IRCallee* ce = LibVEX_Alloc(sizeof(IRCallee)); + IRCallee* ce = LibVEX_Alloc_inline(sizeof(IRCallee)); ce->regparms = regparms; ce->name = name; ce->addr = addr; @@ -1762,7 +1762,7 @@ IRCallee* mkIRCallee ( Int regparms, const HChar* name, void* addr ) IRRegArray* mkIRRegArray ( Int base, IRType elemTy, Int nElems ) { - IRRegArray* arr = LibVEX_Alloc(sizeof(IRRegArray)); + IRRegArray* arr = LibVEX_Alloc_inline(sizeof(IRRegArray)); arr->base = base; arr->elemTy = elemTy; arr->nElems = nElems; @@ -1776,20 +1776,20 @@ IRRegArray* mkIRRegArray ( Int base, IRType elemTy, Int nElems ) /* Constructors -- IRExpr */ IRExpr* IRExpr_Binder ( Int binder ) { - IRExpr* e = LibVEX_Alloc(sizeof(IRExpr)); + IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr)); e->tag = Iex_Binder; e->Iex.Binder.binder = binder; return e; } IRExpr* IRExpr_Get ( Int off, IRType ty ) { - IRExpr* e = LibVEX_Alloc(sizeof(IRExpr)); + IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr)); e->tag = Iex_Get; e->Iex.Get.offset = off; e->Iex.Get.ty = ty; return e; } IRExpr* IRExpr_GetI ( IRRegArray* descr, IRExpr* ix, Int bias ) { - IRExpr* e = LibVEX_Alloc(sizeof(IRExpr)); + IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr)); e->tag = Iex_GetI; e->Iex.GetI.descr = descr; e->Iex.GetI.ix = ix; @@ -1797,15 +1797,15 @@ IRExpr* IRExpr_GetI ( IRRegArray* descr, IRExpr* ix, Int bias ) { return e; } IRExpr* IRExpr_RdTmp ( IRTemp tmp ) { - IRExpr* e = LibVEX_Alloc(sizeof(IRExpr)); + IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr)); e->tag = Iex_RdTmp; e->Iex.RdTmp.tmp = tmp; return e; } IRExpr* IRExpr_Qop ( IROp op, IRExpr* arg1, IRExpr* arg2, IRExpr* arg3, IRExpr* arg4 ) { - IRExpr* e = LibVEX_Alloc(sizeof(IRExpr)); - IRQop* qop = LibVEX_Alloc(sizeof(IRQop)); + IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr)); + IRQop* qop = LibVEX_Alloc_inline(sizeof(IRQop)); qop->op = op; qop->arg1 = arg1; qop->arg2 = arg2; @@ -1817,8 +1817,8 @@ IRExpr* IRExpr_Qop ( IROp op, IRExpr* arg1, IRExpr* arg2, } IRExpr* IRExpr_Triop ( IROp op, IRExpr* arg1, IRExpr* arg2, IRExpr* arg3 ) { - IRExpr* e = LibVEX_Alloc(sizeof(IRExpr)); - IRTriop* triop = LibVEX_Alloc(sizeof(IRTriop)); + IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr)); + IRTriop* triop = LibVEX_Alloc_inline(sizeof(IRTriop)); triop->op = op; triop->arg1 = arg1; triop->arg2 = arg2; @@ -1828,7 +1828,7 @@ IRExpr* IRExpr_Triop ( IROp op, IRExpr* arg1, return e; } IRExpr* IRExpr_Binop ( IROp op, IRExpr* arg1, IRExpr* arg2 ) { - IRExpr* e = LibVEX_Alloc(sizeof(IRExpr)); + IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr)); e->tag = Iex_Binop; e->Iex.Binop.op = op; e->Iex.Binop.arg1 = arg1; @@ -1836,14 +1836,14 @@ IRExpr* IRExpr_Binop ( IROp op, IRExpr* arg1, IRExpr* arg2 ) { return e; } IRExpr* IRExpr_Unop ( IROp op, IRExpr* arg ) { - IRExpr* e = LibVEX_Alloc(sizeof(IRExpr)); + IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr)); e->tag = Iex_Unop; e->Iex.Unop.op = op; e->Iex.Unop.arg = arg; return e; } IRExpr* IRExpr_Load ( IREndness end, IRType ty, IRExpr* addr ) { - IRExpr* e = LibVEX_Alloc(sizeof(IRExpr)); + IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr)); e->tag = Iex_Load; e->Iex.Load.end = end; e->Iex.Load.ty = ty; @@ -1852,13 +1852,13 @@ IRExpr* IRExpr_Load ( IREndness end, IRType ty, IRExpr* addr ) { return e; } IRExpr* IRExpr_Const ( IRConst* con ) { - IRExpr* e = LibVEX_Alloc(sizeof(IRExpr)); + IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr)); e->tag = Iex_Const; e->Iex.Const.con = con; return e; } IRExpr* IRExpr_CCall ( IRCallee* cee, IRType retty, IRExpr** args ) { - IRExpr* e = LibVEX_Alloc(sizeof(IRExpr)); + IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr)); e->tag = Iex_CCall; e->Iex.CCall.cee = cee; e->Iex.CCall.retty = retty; @@ -1866,7 +1866,7 @@ IRExpr* IRExpr_CCall ( IRCallee* cee, IRType retty, IRExpr** args ) { return e; } IRExpr* IRExpr_ITE ( IRExpr* cond, IRExpr* iftrue, IRExpr* iffalse ) { - IRExpr* e = LibVEX_Alloc(sizeof(IRExpr)); + IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr)); e->tag = Iex_ITE; e->Iex.ITE.cond = cond; e->Iex.ITE.iftrue = iftrue; @@ -1874,12 +1874,12 @@ IRExpr* IRExpr_ITE ( IRExpr* cond, IRExpr* iftrue, IRExpr* iffalse ) { return e; } IRExpr* IRExpr_VECRET ( void ) { - IRExpr* e = LibVEX_Alloc(sizeof(IRExpr)); + IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr)); e->tag = Iex_VECRET; return e; } IRExpr* IRExpr_BBPTR ( void ) { - IRExpr* e = LibVEX_Alloc(sizeof(IRExpr)); + IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr)); e->tag = Iex_BBPTR; return e; } @@ -1889,25 +1889,25 @@ IRExpr* IRExpr_BBPTR ( void ) { suitable for use as arg lists in clean/dirty helper calls. */ IRExpr** mkIRExprVec_0 ( void ) { - IRExpr** vec = LibVEX_Alloc(1 * sizeof(IRExpr*)); + IRExpr** vec = LibVEX_Alloc_inline(1 * sizeof(IRExpr*)); vec[0] = NULL; return vec; } IRExpr** mkIRExprVec_1 ( IRExpr* arg1 ) { - IRExpr** vec = LibVEX_Alloc(2 * sizeof(IRExpr*)); + IRExpr** vec = LibVEX_Alloc_inline(2 * sizeof(IRExpr*)); vec[0] = arg1; vec[1] = NULL; return vec; } IRExpr** mkIRExprVec_2 ( IRExpr* arg1, IRExpr* arg2 ) { - IRExpr** vec = LibVEX_Alloc(3 * sizeof(IRExpr*)); + IRExpr** vec = LibVEX_Alloc_inline(3 * sizeof(IRExpr*)); vec[0] = arg1; vec[1] = arg2; vec[2] = NULL; return vec; } IRExpr** mkIRExprVec_3 ( IRExpr* arg1, IRExpr* arg2, IRExpr* arg3 ) { - IRExpr** vec = LibVEX_Alloc(4 * sizeof(IRExpr*)); + IRExpr** vec = LibVEX_Alloc_inline(4 * sizeof(IRExpr*)); vec[0] = arg1; vec[1] = arg2; vec[2] = arg3; @@ -1916,7 +1916,7 @@ IRExpr** mkIRExprVec_3 ( IRExpr* arg1, IRExpr* arg2, IRExpr* arg3 ) { } IRExpr** mkIRExprVec_4 ( IRExpr* arg1, IRExpr* arg2, IRExpr* arg3, IRExpr* arg4 ) { - IRExpr** vec = LibVEX_Alloc(5 * sizeof(IRExpr*)); + IRExpr** vec = LibVEX_Alloc_inline(5 * sizeof(IRExpr*)); vec[0] = arg1; vec[1] = arg2; vec[2] = arg3; @@ -1926,7 +1926,7 @@ IRExpr** mkIRExprVec_4 ( IRExpr* arg1, IRExpr* arg2, IRExpr* arg3, } IRExpr** mkIRExprVec_5 ( IRExpr* arg1, IRExpr* arg2, IRExpr* arg3, IRExpr* arg4, IRExpr* arg5 ) { - IRExpr** vec = LibVEX_Alloc(6 * sizeof(IRExpr*)); + IRExpr** vec = LibVEX_Alloc_inline(6 * sizeof(IRExpr*)); vec[0] = arg1; vec[1] = arg2; vec[2] = arg3; @@ -1937,7 +1937,7 @@ IRExpr** mkIRExprVec_5 ( IRExpr* arg1, IRExpr* arg2, IRExpr* arg3, } IRExpr** mkIRExprVec_6 ( IRExpr* arg1, IRExpr* arg2, IRExpr* arg3, IRExpr* arg4, IRExpr* arg5, IRExpr* arg6 ) { - IRExpr** vec = LibVEX_Alloc(7 * sizeof(IRExpr*)); + IRExpr** vec = LibVEX_Alloc_inline(7 * sizeof(IRExpr*)); vec[0] = arg1; vec[1] = arg2; vec[2] = arg3; @@ -1950,7 +1950,7 @@ IRExpr** mkIRExprVec_6 ( IRExpr* arg1, IRExpr* arg2, IRExpr* arg3, IRExpr** mkIRExprVec_7 ( IRExpr* arg1, IRExpr* arg2, IRExpr* arg3, IRExpr* arg4, IRExpr* arg5, IRExpr* arg6, IRExpr* arg7 ) { - IRExpr** vec = LibVEX_Alloc(8 * sizeof(IRExpr*)); + IRExpr** vec = LibVEX_Alloc_inline(8 * sizeof(IRExpr*)); vec[0] = arg1; vec[1] = arg2; vec[2] = arg3; @@ -1964,7 +1964,7 @@ IRExpr** mkIRExprVec_7 ( IRExpr* arg1, IRExpr* arg2, IRExpr* arg3, IRExpr** mkIRExprVec_8 ( IRExpr* arg1, IRExpr* arg2, IRExpr* arg3, IRExpr* arg4, IRExpr* arg5, IRExpr* arg6, IRExpr* arg7, IRExpr* arg8 ) { - IRExpr** vec = LibVEX_Alloc(9 * sizeof(IRExpr*)); + IRExpr** vec = LibVEX_Alloc_inline(9 * sizeof(IRExpr*)); vec[0] = arg1; vec[1] = arg2; vec[2] = arg3; @@ -1981,7 +1981,7 @@ IRExpr** mkIRExprVec_8 ( IRExpr* arg1, IRExpr* arg2, IRExpr* arg3, /* Constructors -- IRDirty */ IRDirty* emptyIRDirty ( void ) { - IRDirty* d = LibVEX_Alloc(sizeof(IRDirty)); + IRDirty* d = LibVEX_Alloc_inline(sizeof(IRDirty)); d->cee = NULL; d->guard = NULL; d->args = NULL; @@ -2000,7 +2000,7 @@ IRCAS* mkIRCAS ( IRTemp oldHi, IRTemp oldLo, IREndness end, IRExpr* addr, IRExpr* expdHi, IRExpr* expdLo, IRExpr* dataHi, IRExpr* dataLo ) { - IRCAS* cas = LibVEX_Alloc(sizeof(IRCAS)); + IRCAS* cas = LibVEX_Alloc_inline(sizeof(IRCAS)); cas->oldHi = oldHi; cas->oldLo = oldLo; cas->end = end; @@ -2018,7 +2018,7 @@ IRCAS* mkIRCAS ( IRTemp oldHi, IRTemp oldLo, IRPutI* mkIRPutI ( IRRegArray* descr, IRExpr* ix, Int bias, IRExpr* data ) { - IRPutI* puti = LibVEX_Alloc(sizeof(IRPutI)); + IRPutI* puti = LibVEX_Alloc_inline(sizeof(IRPutI)); puti->descr = descr; puti->ix = ix; puti->bias = bias; @@ -2032,7 +2032,7 @@ IRPutI* mkIRPutI ( IRRegArray* descr, IRExpr* ix, IRStoreG* mkIRStoreG ( IREndness end, IRExpr* addr, IRExpr* data, IRExpr* guard ) { - IRStoreG* sg = LibVEX_Alloc(sizeof(IRStoreG)); + IRStoreG* sg = LibVEX_Alloc_inline(sizeof(IRStoreG)); sg->end = end; sg->addr = addr; sg->data = data; @@ -2043,7 +2043,7 @@ IRStoreG* mkIRStoreG ( IREndness end, IRLoadG* mkIRLoadG ( IREndness end, IRLoadGOp cvt, IRTemp dst, IRExpr* addr, IRExpr* alt, IRExpr* guard ) { - IRLoadG* lg = LibVEX_Alloc(sizeof(IRLoadG)); + IRLoadG* lg = LibVEX_Alloc_inline(sizeof(IRLoadG)); lg->end = end; lg->cvt = cvt; lg->dst = dst; @@ -2064,7 +2064,7 @@ IRStmt* IRStmt_NoOp ( void ) return &static_closure; } IRStmt* IRStmt_IMark ( Addr addr, UInt len, UChar delta ) { - IRStmt* s = LibVEX_Alloc(sizeof(IRStmt)); + IRStmt* s = LibVEX_Alloc_inline(sizeof(IRStmt)); s->tag = Ist_IMark; s->Ist.IMark.addr = addr; s->Ist.IMark.len = len; @@ -2072,7 +2072,7 @@ IRStmt* IRStmt_IMark ( Addr addr, UInt len, UChar delta ) { return s; } IRStmt* IRStmt_AbiHint ( IRExpr* base, Int len, IRExpr* nia ) { - IRStmt* s = LibVEX_Alloc(sizeof(IRStmt)); + IRStmt* s = LibVEX_Alloc_inline(sizeof(IRStmt)); s->tag = Ist_AbiHint; s->Ist.AbiHint.base = base; s->Ist.AbiHint.len = len; @@ -2080,27 +2080,27 @@ IRStmt* IRStmt_AbiHint ( IRExpr* base, Int len, IRExpr* nia ) { return s; } IRStmt* IRStmt_Put ( Int off, IRExpr* data ) { - IRStmt* s = LibVEX_Alloc(sizeof(IRStmt)); + IRStmt* s = LibVEX_Alloc_inline(sizeof(IRStmt)); s->tag = Ist_Put; s->Ist.Put.offset = off; s->Ist.Put.data = data; return s; } IRStmt* IRStmt_PutI ( IRPutI* details ) { - IRStmt* s = LibVEX_Alloc(sizeof(IRStmt)); + IRStmt* s = LibVEX_Alloc_inline(sizeof(IRStmt)); s->tag = Ist_PutI; s->Ist.PutI.details = details; return s; } IRStmt* IRStmt_WrTmp ( IRTemp tmp, IRExpr* data ) { - IRStmt* s = LibVEX_Alloc(sizeof(IRStmt)); + IRStmt* s = LibVEX_Alloc_inline(sizeof(IRStmt)); s->tag = Ist_WrTmp; s->Ist.WrTmp.tmp = tmp; s->Ist.WrTmp.data = data; return s; } IRStmt* IRStmt_Store ( IREndness end, IRExpr* addr, IRExpr* data ) { - IRStmt* s = LibVEX_Alloc(sizeof(IRStmt)); + IRStmt* s = LibVEX_Alloc_inline(sizeof(IRStmt)); s->tag = Ist_Store; s->Ist.Store.end = end; s->Ist.Store.addr = addr; @@ -2110,7 +2110,7 @@ IRStmt* IRStmt_Store ( IREndness end, IRExpr* addr, IRExpr* data ) { } IRStmt* IRStmt_StoreG ( IREndness end, IRExpr* addr, IRExpr* data, IRExpr* guard ) { - IRStmt* s = LibVEX_Alloc(sizeof(IRStmt)); + IRStmt* s = LibVEX_Alloc_inline(sizeof(IRStmt)); s->tag = Ist_StoreG; s->Ist.StoreG.details = mkIRStoreG(end, addr, data, guard); vassert(end == Iend_LE || end == Iend_BE); @@ -2118,20 +2118,20 @@ IRStmt* IRStmt_StoreG ( IREndness end, IRExpr* addr, IRExpr* data, } IRStmt* IRStmt_LoadG ( IREndness end, IRLoadGOp cvt, IRTemp dst, IRExpr* addr, IRExpr* alt, IRExpr* guard ) { - IRStmt* s = LibVEX_Alloc(sizeof(IRStmt)); + IRStmt* s = LibVEX_Alloc_inline(sizeof(IRStmt)); s->tag = Ist_LoadG; s->Ist.LoadG.details = mkIRLoadG(end, cvt, dst, addr, alt, guard); return s; } IRStmt* IRStmt_CAS ( IRCAS* cas ) { - IRStmt* s = LibVEX_Alloc(sizeof(IRStmt)); + IRStmt* s = LibVEX_Alloc_inline(sizeof(IRStmt)); s->tag = Ist_CAS; s->Ist.CAS.details = cas; return s; } IRStmt* IRStmt_LLSC ( IREndness end, IRTemp result, IRExpr* addr, IRExpr* storedata ) { - IRStmt* s = LibVEX_Alloc(sizeof(IRStmt)); + IRStmt* s = LibVEX_Alloc_inline(sizeof(IRStmt)); s->tag = Ist_LLSC; s->Ist.LLSC.end = end; s->Ist.LLSC.result = result; @@ -2141,21 +2141,21 @@ IRStmt* IRStmt_LLSC ( IREndness end, } IRStmt* IRStmt_Dirty ( IRDirty* d ) { - IRStmt* s = LibVEX_Alloc(sizeof(IRStmt)); + IRStmt* s = LibVEX_Alloc_inline(sizeof(IRStmt)); s->tag = Ist_Dirty; s->Ist.Dirty.details = d; return s; } IRStmt* IRStmt_MBE ( IRMBusEvent event ) { - IRStmt* s = LibVEX_Alloc(sizeof(IRStmt)); + IRStmt* s = LibVEX_Alloc_inline(sizeof(IRStmt)); s->tag = Ist_MBE; s->Ist.MBE.event = event; return s; } IRStmt* IRStmt_Exit ( IRExpr* guard, IRJumpKind jk, IRConst* dst, Int offsIP ) { - IRStmt* s = LibVEX_Alloc(sizeof(IRStmt)); + IRStmt* s = LibVEX_Alloc_inline(sizeof(IRStmt)); s->tag = Ist_Exit; s->Ist.Exit.guard = guard; s->Ist.Exit.jk = jk; @@ -2169,8 +2169,8 @@ IRStmt* IRStmt_Exit ( IRExpr* guard, IRJumpKind jk, IRConst* dst, IRTypeEnv* emptyIRTypeEnv ( void ) { - IRTypeEnv* env = LibVEX_Alloc(sizeof(IRTypeEnv)); - env->types = LibVEX_Alloc(8 * sizeof(IRType)); + IRTypeEnv* env = LibVEX_Alloc_inline(sizeof(IRTypeEnv)); + env->types = LibVEX_Alloc_inline(8 * sizeof(IRType)); env->types_size = 8; env->types_used = 0; return env; @@ -2181,11 +2181,11 @@ IRTypeEnv* emptyIRTypeEnv ( void ) IRSB* emptyIRSB ( void ) { - IRSB* bb = LibVEX_Alloc(sizeof(IRSB)); + IRSB* bb = LibVEX_Alloc_inline(sizeof(IRSB)); bb->tyenv = emptyIRTypeEnv(); bb->stmts_used = 0; bb->stmts_size = 8; - bb->stmts = LibVEX_Alloc(bb->stmts_size * sizeof(IRStmt*)); + bb->stmts = LibVEX_Alloc_inline(bb->stmts_size * sizeof(IRStmt*)); bb->next = NULL; bb->jumpkind = Ijk_Boring; bb->offsIP = 0; @@ -2209,7 +2209,7 @@ IRExpr** shallowCopyIRExprVec ( IRExpr** vec ) IRExpr** newvec; for (i = 0; vec[i]; i++) ; - newvec = LibVEX_Alloc((i+1)*sizeof(IRExpr*)); + newvec = LibVEX_Alloc_inline((i+1)*sizeof(IRExpr*)); for (i = 0; vec[i]; i++) newvec[i] = vec[i]; newvec[i] = NULL; @@ -2224,7 +2224,7 @@ IRExpr** deepCopyIRExprVec ( IRExpr *const * vec ) IRExpr** newvec; for (i = 0; vec[i]; i++) ; - newvec = LibVEX_Alloc((i+1)*sizeof(IRExpr*)); + newvec = LibVEX_Alloc_inline((i+1)*sizeof(IRExpr*)); for (i = 0; vec[i]; i++) newvec[i] = deepCopyIRExpr(vec[i]); newvec[i] = NULL; @@ -2427,10 +2427,10 @@ IRStmt* deepCopyIRStmt ( const IRStmt* s ) IRTypeEnv* deepCopyIRTypeEnv ( const IRTypeEnv* src ) { Int i; - IRTypeEnv* dst = LibVEX_Alloc(sizeof(IRTypeEnv)); + IRTypeEnv* dst = LibVEX_Alloc_inline(sizeof(IRTypeEnv)); dst->types_size = src->types_size; dst->types_used = src->types_used; - dst->types = LibVEX_Alloc(dst->types_size * sizeof(IRType)); + dst->types = LibVEX_Alloc_inline(dst->types_size * sizeof(IRType)); for (i = 0; i < src->types_used; i++) dst->types[i] = src->types[i]; return dst; @@ -2442,7 +2442,7 @@ IRSB* deepCopyIRSB ( const IRSB* bb ) IRStmt** sts2; IRSB* bb2 = deepCopyIRSBExceptStmts(bb); bb2->stmts_used = bb2->stmts_size = bb->stmts_used; - sts2 = LibVEX_Alloc(bb2->stmts_used * sizeof(IRStmt*)); + sts2 = LibVEX_Alloc_inline(bb2->stmts_used * sizeof(IRStmt*)); for (i = 0; i < bb2->stmts_used; i++) sts2[i] = deepCopyIRStmt(bb->stmts[i]); bb2->stmts = sts2; @@ -3433,7 +3433,7 @@ void addStmtToIRSB ( IRSB* bb, IRStmt* st ) { Int i; if (bb->stmts_used == bb->stmts_size) { - IRStmt** stmts2 = LibVEX_Alloc(2 * bb->stmts_size * sizeof(IRStmt*)); + IRStmt** stmts2 = LibVEX_Alloc_inline(2 * bb->stmts_size * sizeof(IRStmt*)); for (i = 0; i < bb->stmts_size; i++) stmts2[i] = bb->stmts[i]; bb->stmts = stmts2; @@ -3464,7 +3464,7 @@ IRTemp newIRTemp ( IRTypeEnv* env, IRType ty ) Int i; Int new_size = env->types_size==0 ? 8 : 2*env->types_size; IRType* new_types - = LibVEX_Alloc(new_size * sizeof(IRType)); + = LibVEX_Alloc_inline(new_size * sizeof(IRType)); for (i = 0; i < env->types_used; i++) new_types[i] = env->types[i]; env->types = new_types; @@ -4445,7 +4445,7 @@ void sanityCheckIRSB ( const IRSB* bb, const HChar* caller, { Int i; Int n_temps = bb->tyenv->types_used; - Int* def_counts = LibVEX_Alloc(n_temps * sizeof(Int)); + Int* def_counts = LibVEX_Alloc_inline(n_temps * sizeof(Int)); if (0) vex_printf("sanityCheck: %s\n", caller); diff --git a/VEX/priv/ir_opt.c b/VEX/priv/ir_opt.c index 01871bf104..e592f388c1 100644 --- a/VEX/priv/ir_opt.c +++ b/VEX/priv/ir_opt.c @@ -188,12 +188,12 @@ typedef static HashHW* newHHW ( void ) { - HashHW* h = LibVEX_Alloc(sizeof(HashHW)); + HashHW* h = LibVEX_Alloc_inline(sizeof(HashHW)); h->size = 8; h->used = 0; - h->inuse = LibVEX_Alloc(h->size * sizeof(Bool)); - h->key = LibVEX_Alloc(h->size * sizeof(HWord)); - h->val = LibVEX_Alloc(h->size * sizeof(HWord)); + h->inuse = LibVEX_Alloc_inline(h->size * sizeof(Bool)); + h->key = LibVEX_Alloc_inline(h->size * sizeof(HWord)); + h->val = LibVEX_Alloc_inline(h->size * sizeof(HWord)); return h; } @@ -233,9 +233,9 @@ static void addToHHW ( HashHW* h, HWord key, HWord val ) /* Ensure a space is available. */ if (h->used == h->size) { /* Copy into arrays twice the size. */ - Bool* inuse2 = LibVEX_Alloc(2 * h->size * sizeof(Bool)); - HWord* key2 = LibVEX_Alloc(2 * h->size * sizeof(HWord)); - HWord* val2 = LibVEX_Alloc(2 * h->size * sizeof(HWord)); + Bool* inuse2 = LibVEX_Alloc_inline(2 * h->size * sizeof(Bool)); + HWord* key2 = LibVEX_Alloc_inline(2 * h->size * sizeof(HWord)); + HWord* val2 = LibVEX_Alloc_inline(2 * h->size * sizeof(HWord)); for (i = j = 0; i < h->size; i++) { if (!h->inuse[i]) continue; inuse2[j] = True; @@ -2758,7 +2758,7 @@ IRSB* cprop_BB ( IRSB* in ) IRSB* out; IRStmt* st2; Int n_tmps = in->tyenv->types_used; - IRExpr** env = LibVEX_Alloc(n_tmps * sizeof(IRExpr*)); + IRExpr** env = LibVEX_Alloc_inline(n_tmps * sizeof(IRExpr*)); /* Keep track of IRStmt_LoadGs that we need to revisit after processing all the other statements. */ const Int N_FIXUPS = 16; @@ -3094,7 +3094,7 @@ static Bool isOneU1 ( IRExpr* e ) { Int i, i_unconditional_exit; Int n_tmps = bb->tyenv->types_used; - Bool* set = LibVEX_Alloc(n_tmps * sizeof(Bool)); + Bool* set = LibVEX_Alloc_inline(n_tmps * sizeof(Bool)); IRStmt* st; for (i = 0; i < n_tmps; i++) @@ -3405,7 +3405,7 @@ static void irExprVec_to_TmpOrConsts ( /*OUT*/TmpOrConst** outs, /* We have to make two passes, one to count, one to copy. */ for (n = 0; ins[n]; n++) ; - *outs = LibVEX_Alloc(n * sizeof(TmpOrConst)); + *outs = LibVEX_Alloc_inline(n * sizeof(TmpOrConst)); *nOuts = n; /* and now copy .. */ for (i = 0; i < n; i++) { @@ -3580,13 +3580,13 @@ static IRExpr* availExpr_to_IRExpr ( AvailExpr* ae ) IRExpr_RdTmp(ae->u.Btt.arg1), IRExpr_RdTmp(ae->u.Btt.arg2) ); case Btc: - con = LibVEX_Alloc(sizeof(IRConst)); + con = LibVEX_Alloc_inline(sizeof(IRConst)); *con = ae->u.Btc.con2; return IRExpr_Binop( ae->u.Btc.op, IRExpr_RdTmp(ae->u.Btc.arg1), IRExpr_Const(con) ); case Bct: - con = LibVEX_Alloc(sizeof(IRConst)); + con = LibVEX_Alloc_inline(sizeof(IRConst)); *con = ae->u.Bct.con1; return IRExpr_Binop( ae->u.Bct.op, IRExpr_Const(con), @@ -3598,21 +3598,21 @@ static IRExpr* availExpr_to_IRExpr ( AvailExpr* ae ) IRExpr_RdTmp(ae->u.Ittt.e1), IRExpr_RdTmp(ae->u.Ittt.e0)); case Ittc: - con0 = LibVEX_Alloc(sizeof(IRConst)); + con0 = LibVEX_Alloc_inline(sizeof(IRConst)); *con0 = ae->u.Ittc.con0; return IRExpr_ITE(IRExpr_RdTmp(ae->u.Ittc.co), IRExpr_RdTmp(ae->u.Ittc.e1), IRExpr_Const(con0)); case Itct: - con1 = LibVEX_Alloc(sizeof(IRConst)); + con1 = LibVEX_Alloc_inline(sizeof(IRConst)); *con1 = ae->u.Itct.con1; return IRExpr_ITE(IRExpr_RdTmp(ae->u.Itct.co), IRExpr_Const(con1), IRExpr_RdTmp(ae->u.Itct.e0)); case Itcc: - con0 = LibVEX_Alloc(sizeof(IRConst)); - con1 = LibVEX_Alloc(sizeof(IRConst)); + con0 = LibVEX_Alloc_inline(sizeof(IRConst)); + con1 = LibVEX_Alloc_inline(sizeof(IRConst)); *con0 = ae->u.Itcc.con0; *con1 = ae->u.Itcc.con1; return IRExpr_ITE(IRExpr_RdTmp(ae->u.Itcc.co), @@ -3625,7 +3625,7 @@ static IRExpr* availExpr_to_IRExpr ( AvailExpr* ae ) case CCall: { Int i, n = ae->u.CCall.nArgs; vassert(n >= 0); - IRExpr** vec = LibVEX_Alloc((n+1) * sizeof(IRExpr*)); + IRExpr** vec = LibVEX_Alloc_inline((n+1) * sizeof(IRExpr*)); vec[n] = NULL; for (i = 0; i < n; i++) { vec[i] = tmpOrConst_to_IRExpr(&ae->u.CCall.args[i]); @@ -3723,7 +3723,7 @@ static AvailExpr* irExpr_to_AvailExpr ( IRExpr* e, Bool allowLoadsToBeCSEd ) switch (e->tag) { case Iex_Unop: if (e->Iex.Unop.arg->tag == Iex_RdTmp) { - ae = LibVEX_Alloc(sizeof(AvailExpr)); + ae = LibVEX_Alloc_inline(sizeof(AvailExpr)); ae->tag = Ut; ae->u.Ut.op = e->Iex.Unop.op; ae->u.Ut.arg = e->Iex.Unop.arg->Iex.RdTmp.tmp; @@ -3734,7 +3734,7 @@ static AvailExpr* irExpr_to_AvailExpr ( IRExpr* e, Bool allowLoadsToBeCSEd ) case Iex_Binop: if (e->Iex.Binop.arg1->tag == Iex_RdTmp) { if (e->Iex.Binop.arg2->tag == Iex_RdTmp) { - ae = LibVEX_Alloc(sizeof(AvailExpr)); + ae = LibVEX_Alloc_inline(sizeof(AvailExpr)); ae->tag = Btt; ae->u.Btt.op = e->Iex.Binop.op; ae->u.Btt.arg1 = e->Iex.Binop.arg1->Iex.RdTmp.tmp; @@ -3742,7 +3742,7 @@ static AvailExpr* irExpr_to_AvailExpr ( IRExpr* e, Bool allowLoadsToBeCSEd ) return ae; } if (e->Iex.Binop.arg2->tag == Iex_Const) { - ae = LibVEX_Alloc(sizeof(AvailExpr)); + ae = LibVEX_Alloc_inline(sizeof(AvailExpr)); ae->tag = Btc; ae->u.Btc.op = e->Iex.Binop.op; ae->u.Btc.arg1 = e->Iex.Binop.arg1->Iex.RdTmp.tmp; @@ -3751,7 +3751,7 @@ static AvailExpr* irExpr_to_AvailExpr ( IRExpr* e, Bool allowLoadsToBeCSEd ) } } else if (e->Iex.Binop.arg1->tag == Iex_Const && e->Iex.Binop.arg2->tag == Iex_RdTmp) { - ae = LibVEX_Alloc(sizeof(AvailExpr)); + ae = LibVEX_Alloc_inline(sizeof(AvailExpr)); ae->tag = Bct; ae->u.Bct.op = e->Iex.Binop.op; ae->u.Bct.arg2 = e->Iex.Binop.arg2->Iex.RdTmp.tmp; @@ -3762,7 +3762,7 @@ static AvailExpr* irExpr_to_AvailExpr ( IRExpr* e, Bool allowLoadsToBeCSEd ) case Iex_Const: if (e->Iex.Const.con->tag == Ico_F64i) { - ae = LibVEX_Alloc(sizeof(AvailExpr)); + ae = LibVEX_Alloc_inline(sizeof(AvailExpr)); ae->tag = Cf64i; ae->u.Cf64i.f64i = e->Iex.Const.con->Ico.F64i; return ae; @@ -3773,7 +3773,7 @@ static AvailExpr* irExpr_to_AvailExpr ( IRExpr* e, Bool allowLoadsToBeCSEd ) if (e->Iex.ITE.cond->tag == Iex_RdTmp) { if (e->Iex.ITE.iffalse->tag == Iex_RdTmp) { if (e->Iex.ITE.iftrue->tag == Iex_RdTmp) { - ae = LibVEX_Alloc(sizeof(AvailExpr)); + ae = LibVEX_Alloc_inline(sizeof(AvailExpr)); ae->tag = Ittt; ae->u.Ittt.co = e->Iex.ITE.cond->Iex.RdTmp.tmp; ae->u.Ittt.e1 = e->Iex.ITE.iftrue->Iex.RdTmp.tmp; @@ -3781,7 +3781,7 @@ static AvailExpr* irExpr_to_AvailExpr ( IRExpr* e, Bool allowLoadsToBeCSEd ) return ae; } if (e->Iex.ITE.iftrue->tag == Iex_Const) { - ae = LibVEX_Alloc(sizeof(AvailExpr)); + ae = LibVEX_Alloc_inline(sizeof(AvailExpr)); ae->tag = Itct; ae->u.Itct.co = e->Iex.ITE.cond->Iex.RdTmp.tmp; ae->u.Itct.con1 = *(e->Iex.ITE.iftrue->Iex.Const.con); @@ -3790,7 +3790,7 @@ static AvailExpr* irExpr_to_AvailExpr ( IRExpr* e, Bool allowLoadsToBeCSEd ) } } else if (e->Iex.ITE.iffalse->tag == Iex_Const) { if (e->Iex.ITE.iftrue->tag == Iex_RdTmp) { - ae = LibVEX_Alloc(sizeof(AvailExpr)); + ae = LibVEX_Alloc_inline(sizeof(AvailExpr)); ae->tag = Ittc; ae->u.Ittc.co = e->Iex.ITE.cond->Iex.RdTmp.tmp; ae->u.Ittc.e1 = e->Iex.ITE.iftrue->Iex.RdTmp.tmp; @@ -3798,7 +3798,7 @@ static AvailExpr* irExpr_to_AvailExpr ( IRExpr* e, Bool allowLoadsToBeCSEd ) return ae; } if (e->Iex.ITE.iftrue->tag == Iex_Const) { - ae = LibVEX_Alloc(sizeof(AvailExpr)); + ae = LibVEX_Alloc_inline(sizeof(AvailExpr)); ae->tag = Itcc; ae->u.Itcc.co = e->Iex.ITE.cond->Iex.RdTmp.tmp; ae->u.Itcc.con1 = *(e->Iex.ITE.iftrue->Iex.Const.con); @@ -3811,7 +3811,7 @@ static AvailExpr* irExpr_to_AvailExpr ( IRExpr* e, Bool allowLoadsToBeCSEd ) case Iex_GetI: if (e->Iex.GetI.ix->tag == Iex_RdTmp) { - ae = LibVEX_Alloc(sizeof(AvailExpr)); + ae = LibVEX_Alloc_inline(sizeof(AvailExpr)); ae->tag = GetIt; ae->u.GetIt.descr = e->Iex.GetI.descr; ae->u.GetIt.ix = e->Iex.GetI.ix->Iex.RdTmp.tmp; @@ -3821,7 +3821,7 @@ static AvailExpr* irExpr_to_AvailExpr ( IRExpr* e, Bool allowLoadsToBeCSEd ) break; case Iex_CCall: - ae = LibVEX_Alloc(sizeof(AvailExpr)); + ae = LibVEX_Alloc_inline(sizeof(AvailExpr)); ae->tag = CCall; /* Ok to share only the cee, since it is immutable. */ ae->u.CCall.cee = e->Iex.CCall.cee; @@ -3842,7 +3842,7 @@ static AvailExpr* irExpr_to_AvailExpr ( IRExpr* e, Bool allowLoadsToBeCSEd ) "available", which effectively disables CSEing of them, as desired. */ if (allowLoadsToBeCSEd) { - ae = LibVEX_Alloc(sizeof(AvailExpr)); + ae = LibVEX_Alloc_inline(sizeof(AvailExpr)); ae->tag = Load; ae->u.Load.end = e->Iex.Load.end; ae->u.Load.ty = e->Iex.Load.ty; @@ -5733,7 +5733,7 @@ static Interval stmt_modifies_guest_state ( Addr max_ga = 0; Int n_tmps = bb->tyenv->types_used; - UShort* uses = LibVEX_Alloc(n_tmps * sizeof(UShort)); + UShort* uses = LibVEX_Alloc_inline(n_tmps * sizeof(UShort)); /* Phase 1. Scan forwards in bb, counting use occurrences of each temp. Also count occurrences in the bb->next field. Take the @@ -6240,7 +6240,7 @@ static Bool do_XOR_TRANSFORM_IRSB ( IRSB* sb ) /* Make the tmp->expr environment, so we can use it for chasing expressions. */ Int n_tmps = sb->tyenv->types_used; - IRExpr** env = LibVEX_Alloc(n_tmps * sizeof(IRExpr*)); + IRExpr** env = LibVEX_Alloc_inline(n_tmps * sizeof(IRExpr*)); for (i = 0; i < n_tmps; i++) env[i] = NULL; diff --git a/VEX/priv/main_util.c b/VEX/priv/main_util.c index 5f2fa5c4fc..d0732e927b 100644 --- a/VEX/priv/main_util.c +++ b/VEX/priv/main_util.c @@ -52,10 +52,6 @@ into memory, the rate falls by about a factor of 3. */ -/* Allocated memory as returned by LibVEX_Alloc will be aligned on this - boundary. */ -#define REQ_ALIGN 8 - #define N_TEMPORARY_BYTES 5000000 static HChar temporary[N_TEMPORARY_BYTES] __attribute__((aligned(REQ_ALIGN))); @@ -72,9 +68,9 @@ static HChar* permanent_first = &permanent[0]; static HChar* permanent_curr = &permanent[0]; static HChar* permanent_last = &permanent[N_PERMANENT_BYTES-1]; -static HChar* private_LibVEX_alloc_first = &temporary[0]; -static HChar* private_LibVEX_alloc_curr = &temporary[0]; -static HChar* private_LibVEX_alloc_last = &temporary[N_TEMPORARY_BYTES-1]; +HChar* private_LibVEX_alloc_first = &temporary[0]; +HChar* private_LibVEX_alloc_curr = &temporary[0]; +HChar* private_LibVEX_alloc_last = &temporary[N_TEMPORARY_BYTES-1]; static VexAllocMode mode = VexAllocModeTEMP; @@ -160,7 +156,7 @@ VexAllocMode vexGetAllocMode ( void ) } __attribute__((noreturn)) -static void private_LibVEX_alloc_OOM(void) +void private_LibVEX_alloc_OOM(void) { const HChar* pool = "???"; if (private_LibVEX_alloc_first == &temporary[0]) pool = "TEMP"; @@ -201,54 +197,6 @@ void vexSetAllocModeTEMP_and_clear ( void ) /* Exported to library client. */ -/* Allocate in Vex's temporary allocation area. Be careful with this. - You can only call it inside an instrumentation or optimisation - callback that you have previously specified in a call to - LibVEX_Translate. The storage allocated will only stay alive until - translation of the current basic block is complete. - */ - -void* LibVEX_Alloc ( SizeT nbytes ) -{ - struct align { - char c; - union { - char c; - short s; - int i; - long l; - long long ll; - float f; - double d; - /* long double is currently not used and would increase alignment - unnecessarily. */ - /* long double ld; */ - void *pto; - void (*ptf)(void); - } x; - }; - - /* Make sure the compiler does no surprise us */ - vassert(offsetof(struct align,x) <= REQ_ALIGN); - -#if 0 - /* Nasty debugging hack, do not use. */ - return malloc(nbytes); -#else - HChar* curr; - HChar* next; - SizeT ALIGN; - ALIGN = offsetof(struct align,x) - 1; - nbytes = (nbytes + ALIGN) & ~ALIGN; - curr = private_LibVEX_alloc_curr; - next = curr + nbytes; - if (next >= private_LibVEX_alloc_last) - private_LibVEX_alloc_OOM(); - private_LibVEX_alloc_curr = next; - return curr; -#endif -} - void LibVEX_ShowAllocStats ( void ) { vex_printf("vex storage: T total %lld bytes allocated\n", @@ -257,6 +205,10 @@ void LibVEX_ShowAllocStats ( void ) (Long)(permanent_curr - permanent_first) ); } +void *LibVEX_Alloc ( SizeT nbytes ) +{ + return LibVEX_Alloc_inline(nbytes); +} /*---------------------------------------------------------*/ /*--- Bombing out ---*/ diff --git a/VEX/priv/main_util.h b/VEX/priv/main_util.h index 54c20ca29b..5ef7a49e71 100644 --- a/VEX/priv/main_util.h +++ b/VEX/priv/main_util.h @@ -105,6 +105,62 @@ extern void vexAllocSanityCheck ( void ); extern void vexSetAllocModeTEMP_and_clear ( void ); +/* Allocate in Vex's temporary allocation area. Be careful with this. + You can only call it inside an instrumentation or optimisation + callback that you have previously specified in a call to + LibVEX_Translate. The storage allocated will only stay alive until + translation of the current basic block is complete. + */ +extern HChar* private_LibVEX_alloc_first; +extern HChar* private_LibVEX_alloc_curr; +extern HChar* private_LibVEX_alloc_last; +extern void private_LibVEX_alloc_OOM(void) __attribute__((noreturn)); + +/* Allocated memory as returned by LibVEX_Alloc will be aligned on this + boundary. */ +#define REQ_ALIGN 8 + +static inline void* LibVEX_Alloc_inline ( SizeT nbytes ) +{ + struct align { + char c; + union { + char c; + short s; + int i; + long l; + long long ll; + float f; + double d; + /* long double is currently not used and would increase alignment + unnecessarily. */ + /* long double ld; */ + void *pto; + void (*ptf)(void); + } x; + }; + + /* Make sure the compiler does no surprise us */ + vassert(offsetof(struct align,x) <= REQ_ALIGN); + +#if 0 + /* Nasty debugging hack, do not use. */ + return malloc(nbytes); +#else + HChar* curr; + HChar* next; + SizeT ALIGN; + ALIGN = offsetof(struct align,x) - 1; + nbytes = (nbytes + ALIGN) & ~ALIGN; + curr = private_LibVEX_alloc_curr; + next = curr + nbytes; + if (next >= private_LibVEX_alloc_last) + private_LibVEX_alloc_OOM(); + private_LibVEX_alloc_curr = next; + return curr; +#endif +} + #endif /* ndef __VEX_MAIN_UTIL_H */ /*---------------------------------------------------------------*/