lj_libdef.h
lib_jit.o: lib_jit.c lua.h luaconf.h lauxlib.h lualib.h lj_arch.h \
lj_obj.h lj_def.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_bc.h lj_ir.h \
- lj_jit.h lj_iropt.h lj_dispatch.h lj_vm.h lj_vmevent.h lj_lib.h luajit.h \
- lj_libdef.h
+ lj_jit.h lj_iropt.h lj_target.h lj_target_*.h lj_dispatch.h lj_vm.h \
+ lj_vmevent.h lj_lib.h luajit.h lj_libdef.h
lib_math.o: lib_math.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
lj_def.h lj_arch.h lj_lib.h lj_libdef.h
lib_os.o: lib_os.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \
#include "lj_ir.h"
#include "lj_jit.h"
#include "lj_iropt.h"
+#include "lj_target.h"
#endif
#include "lj_dispatch.h"
#include "lj_vm.h"
/* local addr = jit.util.traceexitstub(idx) */
LJLIB_CF(jit_util_traceexitstub)
{
-#ifdef EXITSTUBS_PER_GROUP
ExitNo exitno = (ExitNo)lj_lib_checkint(L, 1);
jit_State *J = L2J(L);
if (exitno < EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR) {
setintptrV(L->top-1, (intptr_t)(void *)exitstub_addr(J, exitno));
return 1;
}
-#else
- UNUSED(L);
-#endif
return 0;
}
#define LJ_TARGET_X64 1
#define LJ_TARGET_X86ORX64 1
#define LJ_TARGET_EHRETREG 0
+#define LJ_TARGET_JUMPRANGE 31 /* +-2^31 = +-2GB */
#define LJ_TARGET_MASKSHIFT 1
#define LJ_TARGET_MASKROT 1
#define LJ_ARCH_DUALNUM 1
#define LJ_ABI_EABI 1
#define LJ_TARGET_ARM 1
#define LJ_TARGET_EHRETREG 0
+#define LJ_TARGET_JUMPRANGE 25 /* +-2^25 = +-32MB */
#define LJ_TARGET_MASKSHIFT 0
#define LJ_TARGET_MASKROT 1
#define LJ_ARCH_DUALNUM 2
#define LJ_TARGET_PPC 1
#define LJ_TARGET_PPCSPE 1
#define LJ_TARGET_EHRETREG 3
+#define LJ_TARGET_JUMPRANGE 25 /* +-2^25 = +-32MB */
#define LJ_TARGET_MASKSHIFT 0
#define LJ_TARGET_MASKROT 1
#define LJ_ARCH_DUALNUM 0
/* Set trace entry point before fixing up tail to allow link to self. */
T->mcode = as->mcp;
- T->mcloop = as->mcloop ? (MSize)(as->mcloop - as->mcp) : 0;
+ T->mcloop = as->mcloop ? (MSize)((char *)as->mcloop - (char *)as->mcp) : 0;
if (!as->loopref)
asm_tail_fixup(as, T->link); /* Note: this may change as->mctop! */
- T->szmcode = (MSize)(as->mctop - as->mcp);
+ T->szmcode = (MSize)((char *)as->mctop - (char *)as->mcp);
VG_INVALIDATE(T->mcode, T->szmcode);
}
#define lj_resetsplit(J) UNUSED(J)
#endif
-/* Exit stubs. */
-#if LJ_TARGET_X86ORX64
-/* Limited by the range of a short fwd jump (127): (2+2)*(32-1)-2 = 122. */
-#define EXITSTUB_SPACING (2+2)
-#define EXITSTUBS_PER_GROUP 32
-#endif
-
/* Fold state is used to fold instructions on-the-fly. */
typedef struct FoldState {
IRIns ins; /* Currently emitted instruction. */
int32_t param[JIT_P__MAX]; /* JIT engine parameters. */
-#if LJ_TARGET_X86ORX64
MCode *exitstubgroup[LJ_MAX_EXITSTUBGR]; /* Exit stub group addresses. */
-#endif
HotPenalty penalty[PENALTY_SLOTS]; /* Penalty slots. */
uint32_t penaltyslot; /* Round-robin index into penalty slots. */
return J->prngstate >> (32-bits);
}
-#ifdef EXITSTUBS_PER_GROUP
-/* Return the address of an exit stub. */
-static LJ_AINLINE MCode *exitstub_addr(jit_State *J, ExitNo exitno)
-{
- lua_assert(J->exitstubgroup[exitno / EXITSTUBS_PER_GROUP] != NULL);
- return (MCode *)((char *)J->exitstubgroup[exitno / EXITSTUBS_PER_GROUP] +
- EXITSTUB_SPACING*(exitno % EXITSTUBS_PER_GROUP));
-}
-#endif
-
#endif
#define mcode_validptr(p) ((p) && (uintptr_t)(p) < 0xffff0000)
#endif
-#if LJ_TARGET_X64
-#define MCODE_JUMPRANGE 31
-#elif LJ_TARGET_ARM
-#define MCODE_JUMPRANGE 26
-#else
-#define MCODE_JUMPRANGE 32
-#endif
-
-#if MCODE_JUMPRANGE == 32
-
-/* All 32 bit memory addresses are reachable by relative jumps. */
-#define mcode_alloc(J, sz) mcode_alloc_at((J), 0, (sz), MCPROT_GEN)
-
-#else
+#ifdef LJ_TARGET_JUMPRANGE
/* Get memory within relative jump distance of our code in 64 bit mode. */
static void *mcode_alloc(jit_State *J, size_t sz)
** Try addresses within a distance of target-range/2+1MB..target+range/2-1MB.
*/
uintptr_t target = (uintptr_t)(void *)lj_vm_exit_handler & ~(uintptr_t)0xffff;
- const uintptr_t range = (1u << MCODE_JUMPRANGE) - (1u << 21);
+ const uintptr_t range = (1u << LJ_TARGET_JUMPRANGE) - (1u << 21);
/* First try a contiguous area below the last one. */
uintptr_t hint = J->mcarea ? (uintptr_t)J->mcarea - sz : 0;
int i;
return NULL;
}
+#else
+
+/* All memory addresses are reachable by relative jumps. */
+#define mcode_alloc(J, sz) mcode_alloc_at((J), 0, (sz), MCPROT_GEN)
+
#endif
/* -- MCode area management ----------------------------------------------- */
o->u32.lo = (uint32_t)*sps;
} else if (irt_isinteger(t)) {
setintV(o, *sps);
- } else if (!LJ_SOFTFP && irt_isnum(t)) {
+#if !LJ_SOFTFP
+ } else if (irt_isnum(t)) {
o->u64 = *(uint64_t *)sps;
+#endif
#if LJ_64
} else if (irt_islightud(t)) {
/* 64 bit lightuserdata which may escape already has the tag bits. */
o->u32.lo = (uint32_t)ex->gpr[r-RID_MIN_GPR];
} else if (irt_isinteger(t)) {
setintV(o, (int32_t)ex->gpr[r-RID_MIN_GPR]);
- } else if (!LJ_SOFTFP && irt_isnum(t)) {
+#if !LJ_SOFTFP
+ } else if (irt_isnum(t)) {
setnumV(o, ex->fpr[r-RID_MIN_FPR]);
+#endif
#if LJ_64
} else if (irt_islightud(t)) {
/* 64 bit lightuserdata which may escape already has the tag bits. */
#error "Missing include for target CPU"
#endif
+/* Return the address of an exit stub. */
+static LJ_AINLINE MCode *exitstub_addr(jit_State *J, ExitNo exitno)
+{
+ lua_assert(J->exitstubgroup[exitno / EXITSTUBS_PER_GROUP] != NULL);
+ return (MCode *)((char *)J->exitstubgroup[exitno / EXITSTUBS_PER_GROUP] +
+ EXITSTUB_SPACING*(exitno % EXITSTUBS_PER_GROUP));
+}
+
#endif
int32_t spill[256]; /* Spill slots. */
} ExitState;
+/* Limited by the range of a short fwd jump (127): (2+2)*(32-1)-2 = 122. */
+#define EXITSTUB_SPACING (2+2)
+#define EXITSTUBS_PER_GROUP 32
+
/* -- x86 ModRM operand encoding ------------------------------------------ */
typedef enum {
memset(J->penalty, 0, sizeof(J->penalty));
/* Free the whole machine code and invalidate all exit stub groups. */
lj_mcode_free(J);
-#ifdef EXITSTUBS_PER_GROUP
memset(J->exitstubgroup, 0, sizeof(J->exitstubgroup));
-#endif
lj_vmevent_send(L, TRACE,
setstrV(L, L->top++, lj_str_newlit(L, "flush"));
);
return NULL;
}
+#ifndef LUAJIT_DISABLE_VMEVENT
+/* Push all registers from exit state. */
+static void trace_exit_regs(lua_State *L, ExitState *ex)
+{
+ int32_t i;
+ setintV(L->top++, RID_NUM_GPR);
+ setintV(L->top++, RID_NUM_FPR);
+ for (i = 0; i < RID_NUM_GPR; i++) {
+ if (sizeof(ex->gpr[i]) == sizeof(int32_t))
+ setintV(L->top++, (int32_t)ex->gpr[i]);
+ else
+ setnumV(L->top++, (lua_Number)ex->gpr[i]);
+ }
+#if !LJ_SOFTFP
+ for (i = 0; i < RID_NUM_FPR; i++) {
+ setnumV(L->top, ex->fpr[i]);
+ if (LJ_UNLIKELY(tvisnan(L->top)))
+ setnanV(L->top);
+ L->top++;
+ }
+#endif
+}
+#endif
+
+#ifdef EXITSTATE_PCREG
+/* Determine trace number from pc of exit instruction. */
+static TraceNo trace_exit_find(jit_State *J, MCode *pc)
+{
+ TraceNo traceno;
+ for (traceno = 1; traceno < J->sizetrace; traceno++) {
+ GCtrace *T = traceref(J, traceno);
+ if (T && pc >= T->mcode && pc < (MCode *)((char *)T->mcode + T->szmcode))
+ return traceno;
+ }
+ lua_assert(0);
+ return 0;
+}
+#endif
+
/* A trace exited. Restore interpreter state. */
int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr)
{
ERRNO_SAVE
lua_State *L = J->L;
+ ExitState *ex = (ExitState *)exptr;
ExitDataCP exd;
int errcode;
const BCIns *pc;
void *cf;
+#ifdef EXITSTATE_PCREG
+ J->parent = trace_exit_find(J, (MCode *)(intptr_t)ex->gpr[EXITSTATE_PCREG]);
+#endif
exd.J = J;
exd.exptr = exptr;
errcode = lj_vm_cpcall(L, NULL, &exd, trace_exit_cp);
return -errcode; /* Return negated error code. */
lj_vmevent_send(L, TEXIT,
- ExitState *ex = (ExitState *)exptr;
- int32_t i;
lj_state_checkstack(L, 4+RID_NUM_GPR+RID_NUM_FPR+LUA_MINSTACK);
setintV(L->top++, J->parent);
setintV(L->top++, J->exitno);
- setintV(L->top++, RID_NUM_GPR);
- setintV(L->top++, RID_NUM_FPR);
- for (i = 0; i < RID_NUM_GPR; i++) {
- if (sizeof(ex->gpr[i]) == sizeof(int32_t))
- setintV(L->top++, (int32_t)ex->gpr[i]);
- else
- setnumV(L->top++, (lua_Number)ex->gpr[i]);
- }
- for (i = 0; i < RID_NUM_FPR; i++) {
- setnumV(L->top, ex->fpr[i]);
- if (LJ_UNLIKELY(tvisnan(L->top)))
- setnanV(L->top);
- L->top++;
- }
+ trace_exit_regs(L, ex);
);
pc = exd.pc;