--- /dev/null
+|// Low-level VM code for ARM64 CPUs.
+|// Bytecode interpreter, fast functions and helper functions.
+|// Copyright (C) 2005-2023 Mike Pall. See Copyright Notice in luajit.h
+|
+|.arch arm64
+|.section code_op, code_sub
+|
+|.actionlist build_actionlist
+|.globals GLOB_
+|.globalnames globnames
+|.externnames extnames
+|
+|// Note: The ragged indentation of the instructions is intentional.
+|// The starting columns indicate data dependencies.
+|
+|//-----------------------------------------------------------------------
+|
+|// ARM64 registers and the AAPCS64 ABI 1.0 at a glance:
+|//
+|// x0-x17 temp, x19-x28 callee-saved, x29 fp, x30 lr
+|// x18 is reserved on most platforms. Don't use it, save it or restore it.
+|// x31 doesn't exist. Register number 31 either means xzr/wzr (zero) or sp,
+|// depending on the instruction.
+|// v0-v7 temp, v8-v15 callee-saved (only d8-d15 preserved), v16-v31 temp
+|//
+|// x0-x7/v0-v7 hold parameters and results.
+|
+|// Fixed register assignments for the interpreter.
+|
+|// The following must be C callee-save.
+|.define BASE, x19 // Base of current Lua stack frame.
+|.define KBASE, x20 // Constants of current Lua function.
+|.define PC, x21 // Next PC.
+|.define GLREG, x22 // Global state.
+|.define LREG, x23 // Register holding lua_State (also in SAVE_L).
+|.define TISNUM, x24 // Constant LJ_TISNUM << 47.
+|.define TISNUMhi, x25 // Constant LJ_TISNUM << 15.
+|.define TISNIL, x26 // Constant -1LL.
+|.define fp, x29 // Yes, we have to maintain a frame pointer.
+|
+|.define ST_INTERP, w26 // Constant -1.
+|
+|// The following temporaries are not saved across C calls, except for RA/RC.
+|.define RA, x27
+|.define RC, x28
+|.define RB, x17
+|.define RAw, w27
+|.define RCw, w28
+|.define RBw, w17
+|.define INS, x16
+|.define INSw, w16
+|.define ITYPE, x15
+|.define TMP0, x8
+|.define TMP1, x9
+|.define TMP2, x10
+|.define TMP3, x11
+|.define TMP0w, w8
+|.define TMP1w, w9
+|.define TMP2w, w10
+|.define TMP3w, w11
+|
+|// Calling conventions. Also used as temporaries.
+|.define CARG1, x0
+|.define CARG2, x1
+|.define CARG3, x2
+|.define CARG4, x3
+|.define CARG5, x4
+|.define CARG1w, w0
+|.define CARG2w, w1
+|.define CARG3w, w2
+|.define CARG4w, w3
+|.define CARG5w, w4
+|
+|.define FARG1, d0
+|.define FARG2, d1
+|
+|.define CRET1, x0
+|.define CRET1w, w0
+|
+|//-----------------------------------------------------------------------
+|
+|// ARM64e pointer authentication codes (PAC).
+|.if PAUTH
+|.macro sp_auth; pacibsp; .endmacro
+|.macro br_auth, reg; braaz reg; .endmacro
+|.macro blr_auth, reg; blraaz reg; .endmacro
+|.macro ret_auth; retab; .endmacro
+|.else
+|.macro sp_auth; .endmacro
+|.macro br_auth, reg; br reg; .endmacro
+|.macro blr_auth, reg; blr reg; .endmacro
+|.macro ret_auth; ret; .endmacro
+|.endif
+|
+|//-----------------------------------------------------------------------
+|
+|// Stack layout while in interpreter. Must match with lj_frame.h.
+|
+|.define CFRAME_SPACE, 208
+|//----- 16 byte aligned, <-- sp entering interpreter
+|.define SAVE_FP_LR_, 192
+|.define SAVE_GPR_, 112 // 112+10*8: 64 bit GPR saves
+|.define SAVE_FPR_, 48 // 48+8*8: 64 bit FPR saves
+|// Unused [sp, #44] // 32 bit values
+|.define SAVE_NRES, [sp, #40]
+|.define SAVE_ERRF, [sp, #36]
+|.define SAVE_MULTRES, [sp, #32]
+|.define TMPD, [sp, #24] // 64 bit values
+|.define SAVE_L, [sp, #16]
+|.define SAVE_PC, [sp, #8]
+|.define SAVE_CFRAME, [sp, #0]
+|//----- 16 byte aligned, <-- sp while in interpreter.
+|
+|.define TMPDofs, #24
+|
+|.if WIN
+|// Windows unwind data is suited to r1 stored first.
+|.macro stp_unwind, r1, r2, where
+| stp r1, r2, where
+|.endmacro
+|.macro ldp_unwind, r1, r2, where
+| ldp r1, r2, where
+|.endmacro
+|.macro ldp_unwind, r1, r2, where, post_index
+| ldp r1, r2, where, post_index
+|.endmacro
+|.else
+|// Otherwise store r2 first for compact unwind info (OSX).
+|.macro stp_unwind, r1, r2, where
+| stp r2, r1, where
+|.endmacro
+|.macro ldp_unwind, r1, r2, where
+| ldp r2, r1, where
+|.endmacro
+|.macro ldp_unwind, r1, r2, where, post_index
+| ldp r2, r1, where, post_index
+|.endmacro
+|.endif
+|
+|.macro save_, gpr1, gpr2, fpr1, fpr2
+| stp_unwind d..fpr1, d..fpr2, [sp, # SAVE_FPR_+(14-fpr1)*8]
+| stp_unwind x..gpr1, x..gpr2, [sp, # SAVE_GPR_+(27-gpr1)*8]
+|.endmacro
+|.macro rest_, gpr1, gpr2, fpr1, fpr2
+| ldp_unwind d..fpr1, d..fpr2, [sp, # SAVE_FPR_+(14-fpr1)*8]
+| ldp_unwind x..gpr1, x..gpr2, [sp, # SAVE_GPR_+(27-gpr1)*8]
+|.endmacro
+|
+|.macro saveregs
+| sp_auth
+| sub sp, sp, # CFRAME_SPACE
+| stp fp, lr, [sp, # SAVE_FP_LR_]
+| add fp, sp, # SAVE_FP_LR_
+| stp_unwind x19, x20, [sp, # SAVE_GPR_+(27-19)*8]
+| save_ 21, 22, 8, 9
+| save_ 23, 24, 10, 11
+| save_ 25, 26, 12, 13
+| save_ 27, 28, 14, 15
+|.endmacro
+|.macro restoreregs
+| ldp_unwind x19, x20, [sp, # SAVE_GPR_+(27-19)*8]
+| rest_ 21, 22, 8, 9
+| rest_ 23, 24, 10, 11
+| rest_ 25, 26, 12, 13
+| rest_ 27, 28, 14, 15
+| ldp fp, lr, [sp, # SAVE_FP_LR_]
+| add sp, sp, # CFRAME_SPACE
+|.endmacro
+|
+|// Type definitions. Some of these are only used for documentation.
+|.type L, lua_State, LREG
+|.type GL, global_State, GLREG
+|.type TVALUE, TValue
+|.type GCOBJ, GCobj
+|.type STR, GCstr
+|.type TAB, GCtab
+|.type LFUNC, GCfuncL
+|.type CFUNC, GCfuncC
+|.type PROTO, GCproto
+|.type UPVAL, GCupval
+|.type NODE, Node
+|.type NARGS8, int
+|.type TRACE, GCtrace
+|.type SBUF, SBuf
+|
+|//-----------------------------------------------------------------------
+|
+|// Trap for not-yet-implemented parts.
+|.macro NYI; brk; .endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Access to frame relative to BASE.
+|.define FRAME_FUNC, #-16
+|.define FRAME_PC, #-8
+|
+|// Endian-specific defines.
+|.if ENDIAN_LE
+|.define LO, 0
+|.define OFS_RD, 2
+|.define OFS_RB, 3
+|.define OFS_RA, 1
+|.define OFS_OP, 0
+|.else
+|.define LO, 4
+|.define OFS_RD, 0
+|.define OFS_RB, 0
+|.define OFS_RA, 2
+|.define OFS_OP, 3
+|.endif
+|
+|.macro decode_RA, dst, ins; ubfx dst, ins, #8, #8; .endmacro
+|.macro decode_RB, dst, ins; ubfx dst, ins, #24, #8; .endmacro
+|.macro decode_RC, dst, ins; ubfx dst, ins, #16, #8; .endmacro
+|.macro decode_RD, dst, ins; ubfx dst, ins, #16, #16; .endmacro
+|.macro decode_RC8RD, dst, src; ubfiz dst, src, #3, #8; .endmacro
+|
+|// Instruction decode+dispatch.
+|.macro ins_NEXT
+| ldr INSw, [PC], #4
+| add TMP1, GL, INS, uxtb #3
+| decode_RA RA, INS
+| ldr TMP0, [TMP1, #GG_G2DISP]
+| decode_RD RC, INS
+| br_auth TMP0
+|.endmacro
+|
+|// Instruction footer.
+|.if 1
+| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
+| .define ins_next, ins_NEXT
+| .define ins_next_, ins_NEXT
+|.else
+| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
+| // Affects only certain kinds of benchmarks (and only with -j off).
+| .macro ins_next
+| b ->ins_next
+| .endmacro
+| .macro ins_next_
+| ->ins_next:
+| ins_NEXT
+| .endmacro
+|.endif
+|
+|// Call decode and dispatch.
+|.macro ins_callt
+| // BASE = new base, CARG3 = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+| ldr PC, LFUNC:CARG3->pc
+| ldr INSw, [PC], #4
+| add TMP1, GL, INS, uxtb #3
+| decode_RA RA, INS
+| ldr TMP0, [TMP1, #GG_G2DISP]
+| add RA, BASE, RA, lsl #3
+| br_auth TMP0
+|.endmacro
+|
+|.macro ins_call
+| // BASE = new base, CARG3 = LFUNC/CFUNC, RC = nargs*8, PC = caller PC
+| str PC, [BASE, FRAME_PC]
+| ins_callt
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Macros to check the TValue type and extract the GCobj. Branch on failure.
+|.macro checktp, reg, tp, target
+| asr ITYPE, reg, #47
+| cmn ITYPE, #-tp
+| and reg, reg, #LJ_GCVMASK
+| bne target
+|.endmacro
+|.macro checktp, dst, reg, tp, target
+| asr ITYPE, reg, #47
+| cmn ITYPE, #-tp
+| and dst, reg, #LJ_GCVMASK
+| bne target
+|.endmacro
+|.macro checkstr, reg, target; checktp reg, LJ_TSTR, target; .endmacro
+|.macro checktab, reg, target; checktp reg, LJ_TTAB, target; .endmacro
+|.macro checkfunc, reg, target; checktp reg, LJ_TFUNC, target; .endmacro
+|.macro checkint, reg, target
+| cmp TISNUMhi, reg, lsr #32
+| bne target
+|.endmacro
+|.macro checknum, reg, target
+| cmp TISNUMhi, reg, lsr #32
+| bls target
+|.endmacro
+|.macro checknumber, reg, target
+| cmp TISNUMhi, reg, lsr #32
+| blo target
+|.endmacro
+|
+|.macro init_constants
+| movn TISNIL, #0
+| movz TISNUM, #(LJ_TISNUM>>1)&0xffff, lsl #48
+| movz TISNUMhi, #(LJ_TISNUM>>1)&0xffff, lsl #16
+|.endmacro
+|
+|.macro mov_false, reg; movn reg, #0x8000, lsl #32; .endmacro
+|.macro mov_true, reg; movn reg, #0x0001, lsl #48; .endmacro
+|.macro mov_nil, reg; mov reg, TISNIL; .endmacro
+|.macro cmp_nil, reg; cmp reg, TISNIL; .endmacro
+|.macro add_TISNUM, dst, src; add dst, src, TISNUM; .endmacro
+|
+#define GL_J(field) (GG_G2J + (int)offsetof(jit_State, field))
+|
+#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
+|
+|.macro hotcheck, delta
+| lsr CARG1, PC, #1
+| and CARG1, CARG1, #126
+| add CARG1, CARG1, #GG_G2DISP+GG_DISP2HOT
+| ldrh CARG2w, [GL, CARG1]
+| subs CARG2, CARG2, #delta
+| strh CARG2w, [GL, CARG1]
+|.endmacro
+|
+|.macro hotloop
+| hotcheck HOTCOUNT_LOOP
+| blo ->vm_hotloop
+|.endmacro
+|
+|.macro hotcall
+| hotcheck HOTCOUNT_CALL
+| blo ->vm_hotcall
+|.endmacro
+|
+|// Set current VM state.
+|.macro mv_vmstate, reg, st; movn reg, #LJ_VMST_..st; .endmacro
+|.macro st_vmstate, reg; str reg, GL->vmstate; .endmacro
+|
+|// Move table write barrier back. Overwrites mark and tmp.
+|.macro barrierback, tab, mark, tmp
+| ldr tmp, GL->gc.grayagain
+| and mark, mark, #~LJ_GC_BLACK // black2gray(tab)
+| str tab, GL->gc.grayagain
+| strb mark, tab->marked
+| str tmp, tab->gclist
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+
+#if !LJ_DUALNUM
+#error "Only dual-number mode supported for ARM64 target"
+#endif
+
+/* Generate subroutines used by opcodes and other parts of the VM. */
+/* The .code_sub section should be last to help static branch prediction. */
+static void build_subroutines(BuildCtx *ctx)
+{
+ |.code_sub
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Return handling ----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_returnp:
+ | // See vm_return. Also: RB = previous base.
+ | tbz PC, #2, ->cont_dispatch // (PC & FRAME_P) == 0?
+ |
+ | // Return from pcall or xpcall fast func.
+ | ldr PC, [RB, FRAME_PC] // Fetch PC of previous frame.
+ | mov_true TMP0
+ | mov BASE, RB
+ | // Prepending may overwrite the pcall frame, so do it at the end.
+ | str TMP0, [RA, #-8]! // Prepend true to results.
+ |
+ |->vm_returnc:
+ | adds RC, RC, #8 // RC = (nresults+1)*8.
+ | mov CRET1, #LUA_YIELD
+ | beq ->vm_unwind_c_eh
+ | str RCw, SAVE_MULTRES
+ | ands CARG1, PC, #FRAME_TYPE
+ | beq ->BC_RET_Z // Handle regular return to Lua.
+ |
+ |->vm_return:
+ | // BASE = base, RA = resultptr, RC/MULTRES = (nresults+1)*8, PC = return
+ | // CARG1 = PC & FRAME_TYPE
+ | and RB, PC, #~FRAME_TYPEP
+ | cmp CARG1, #FRAME_C
+ | sub RB, BASE, RB // RB = previous base.
+ | bne ->vm_returnp
+ |
+ | str RB, L->base
+ | ldrsw CARG2, SAVE_NRES // CARG2 = nresults+1.
+ | mv_vmstate TMP0w, C
+ | sub BASE, BASE, #16
+ | subs TMP2, RC, #8
+ | st_vmstate TMP0w
+ | beq >2
+ |1:
+ | subs TMP2, TMP2, #8
+ | ldr TMP0, [RA], #8
+ | str TMP0, [BASE], #8
+ | bne <1
+ |2:
+ | cmp RC, CARG2, lsl #3 // More/less results wanted?
+ | bne >6
+ |3:
+ | str BASE, L->top // Store new top.
+ |
+ |->vm_leave_cp:
+ | ldr RC, SAVE_CFRAME // Restore previous C frame.
+ | mov CRET1, #0 // Ok return status for vm_pcall.
+ | str RC, L->cframe
+ |
+ |->vm_leave_unw:
+ | restoreregs
+ | ret_auth
+ |
+ |6:
+ | bgt >7 // Less results wanted?
+ | // More results wanted. Check stack size and fill up results with nil.
+ | ldr CARG3, L->maxstack
+ | cmp BASE, CARG3
+ | bhs >8
+ | str TISNIL, [BASE], #8
+ | add RC, RC, #8
+ | b <2
+ |
+ |7: // Less results wanted.
+ | cbz CARG2, <3 // LUA_MULTRET+1 case?
+ | sub CARG1, RC, CARG2, lsl #3
+ | sub BASE, BASE, CARG1 // Shrink top.
+ | b <3
+ |
+ |8: // Corner case: need to grow stack for filling up results.
+ | // This can happen if:
+ | // - A C function grows the stack (a lot).
+ | // - The GC shrinks the stack in between.
+ | // - A return back from a lua_call() with (high) nresults adjustment.
+ | str BASE, L->top // Save current top held in BASE (yes).
+ | mov CARG1, L
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | ldr BASE, L->top // Need the (realloced) L->top in BASE.
+ | ldrsw CARG2, SAVE_NRES
+ | b <2
+ |
+ |->vm_unwind_c: // Unwind C stack, return from vm_pcall.
+ | // (void *cframe, int errcode)
+ | add fp, CARG1, # SAVE_FP_LR_
+ | mov sp, CARG1
+ | mov CRET1, CARG2
+ | ldr L, SAVE_L
+ | ldr GL, L->glref
+ |->vm_unwind_c_eh: // Landing pad for external unwinder.
+ | mv_vmstate TMP0w, C
+ | st_vmstate TMP0w
+ | b ->vm_leave_unw
+ |
+ |->vm_unwind_ff: // Unwind C stack, return from ff pcall.
+ | // (void *cframe)
+ | add fp, CARG1, # SAVE_FP_LR_
+ | mov sp, CARG1
+ | ldr L, SAVE_L
+ | init_constants
+ | ldr GL, L->glref // Setup pointer to global state.
+ |->vm_unwind_ff_eh: // Landing pad for external unwinder.
+ | mov RC, #16 // 2 results: false + error message.
+ | ldr BASE, L->base
+ | mov_false TMP0
+ | sub RA, BASE, #8 // Results start at BASE-8.
+ | ldr PC, [BASE, FRAME_PC] // Fetch PC of previous frame.
+ | str TMP0, [BASE, #-8] // Prepend false to error message.
+ | st_vmstate ST_INTERP
+ | b ->vm_returnc
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Grow stack for calls -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_growstack_c: // Grow stack for C function.
+ | // CARG1 = L
+ | mov CARG2, #LUA_MINSTACK
+ | b >2
+ |
+ |->vm_growstack_l: // Grow stack for Lua function.
+ | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC
+ | add RC, BASE, RC
+ | sub RA, RA, BASE
+ | mov CARG1, L
+ | stp BASE, RC, L->base
+ | add PC, PC, #4 // Must point after first instruction.
+ | lsr CARG2, RA, #3
+ |2:
+ | // L->base = new base, L->top = top
+ | str PC, SAVE_PC
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | ldp BASE, RC, L->base
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
+ | sub NARGS8:RC, RC, BASE
+ | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
+ | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+ | ins_callt // Just retry the call.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Entry points into the assembler VM ---------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_resume: // Setup C frame and resume thread.
+ | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
+ | saveregs
+ | mov L, CARG1
+ | ldr GL, L->glref // Setup pointer to global state.
+ | mov BASE, CARG2
+ | str L, SAVE_L
+ | mov PC, #FRAME_CP
+ | str wzr, SAVE_NRES
+ | add TMP0, sp, #CFRAME_RESUME
+ | ldrb TMP1w, L->status
+ | str wzr, SAVE_ERRF
+ | str L, SAVE_PC // Any value outside of bytecode is ok.
+ | str xzr, SAVE_CFRAME
+ | str TMP0, L->cframe
+ | cbz TMP1w, >3
+ |
+ | // Resume after yield (like a return).
+ | str L, GL->cur_L
+ | mov RA, BASE
+ | ldp BASE, CARG1, L->base
+ | init_constants
+ | ldr PC, [BASE, FRAME_PC]
+ | strb wzr, L->status
+ | sub RC, CARG1, BASE
+ | ands CARG1, PC, #FRAME_TYPE
+ | add RC, RC, #8
+ | st_vmstate ST_INTERP
+ | str RCw, SAVE_MULTRES
+ | beq ->BC_RET_Z
+ | b ->vm_return
+ |
+ |->vm_pcall: // Setup protected C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
+ | saveregs
+ | mov PC, #FRAME_CP
+ | str CARG4w, SAVE_ERRF
+ | b >1
+ |
+ |->vm_call: // Setup C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1)
+ | saveregs
+ | mov PC, #FRAME_C
+ |
+ |1: // Entry point for vm_pcall above (PC = ftype).
+ | ldr RC, L:CARG1->cframe
+ | str CARG3w, SAVE_NRES
+ | mov L, CARG1
+ | str CARG1, SAVE_L
+ | ldr GL, L->glref // Setup pointer to global state.
+ | mov BASE, CARG2
+ | str CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | add TMP0, sp, #0
+ | str RC, SAVE_CFRAME
+ | str TMP0, L->cframe // Add our C frame to cframe chain.
+ |
+ |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype).
+ | str L, GL->cur_L
+ | ldp RB, CARG1, L->base // RB = old base (for vmeta_call).
+ | add PC, PC, BASE
+ | init_constants
+ | sub PC, PC, RB // PC = frame delta + frame type
+ | sub NARGS8:RC, CARG1, BASE
+ | st_vmstate ST_INTERP
+ |
+ |->vm_call_dispatch:
+ | // RB = old base, BASE = new base, RC = nargs*8, PC = caller PC
+ | ldr CARG3, [BASE, FRAME_FUNC]
+ | checkfunc CARG3, ->vmeta_call
+ |
+ |->vm_call_dispatch_f:
+ | ins_call
+ | // BASE = new base, CARG3 = func, RC = nargs*8, PC = caller PC
+ |
+ |->vm_cpcall: // Setup protected C frame, call C.
+ | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
+ | saveregs
+ | mov L, CARG1
+ | ldr RA, L:CARG1->stack
+ | str CARG1, SAVE_L
+ | ldr GL, L->glref // Setup pointer to global state.
+ | ldr RB, L->top
+ | str CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | ldr RC, L->cframe
+ | sub RA, RA, RB // Compute -savestack(L, L->top).
+ | str RAw, SAVE_NRES // Neg. delta means cframe w/o frame.
+ | str wzr, SAVE_ERRF // No error function.
+ | add TMP0, sp, #0
+ | str RC, SAVE_CFRAME
+ | str TMP0, L->cframe // Add our C frame to cframe chain.
+ | str L, GL->cur_L
+ | blr_auth CARG4 // (lua_State *L, lua_CFunction func, void *ud)
+ | mov BASE, CRET1
+ | mov PC, #FRAME_CP
+ | cbnz BASE, <3 // Else continue with the call.
+ | b ->vm_leave_cp // No base? Just remove C frame.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Metamethod handling ------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |//-- Continuation dispatch ----------------------------------------------
+ |
+ |->cont_dispatch:
+ | // BASE = meta base, RA = resultptr, RC = (nresults+1)*8
+ | ldr LFUNC:CARG3, [RB, FRAME_FUNC]
+ | ldr CARG1, [BASE, #-32] // Get continuation.
+ | mov CARG4, BASE
+ | mov BASE, RB // Restore caller BASE.
+ | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
+ |.if FFI
+ | cmp CARG1, #1
+ |.endif
+ | ldr PC, [CARG4, #-24] // Restore PC from [cont|PC].
+ | add TMP0, RA, RC
+ | str TISNIL, [TMP0, #-8] // Ensure one valid arg.
+ |.if FFI
+ | bls >1
+ |.endif
+ | ldr CARG3, LFUNC:CARG3->pc
+ | ldr KBASE, [CARG3, #PC2PROTO(k)]
+ | // BASE = base, RA = resultptr, CARG4 = meta base
+ | br_auth CARG1
+ |
+ |.if FFI
+ |1:
+ | beq ->cont_ffi_callback // cont = 1: return from FFI callback.
+ | // cont = 0: tailcall from C function.
+ | sub CARG4, CARG4, #32
+ | sub RC, CARG4, BASE
+ | b ->vm_call_tail
+ |.endif
+ |
+ |->cont_cat: // RA = resultptr, CARG4 = meta base
+ | ldr INSw, [PC, #-4]
+ | sub CARG2, CARG4, #32
+ | ldr TMP0, [RA]
+ | str BASE, L->base
+ | decode_RB RB, INS
+ | decode_RA RA, INS
+ | add TMP1, BASE, RB, lsl #3
+ | subs TMP1, CARG2, TMP1
+ | beq >1
+ | str TMP0, [CARG2]
+ | lsr CARG3, TMP1, #3
+ | b ->BC_CAT_Z
+ |
+ |1:
+ | str TMP0, [BASE, RA, lsl #3]
+ | b ->cont_nop
+ |
+ |//-- Table indexing metamethods -----------------------------------------
+ |
+ |->vmeta_tgets1:
+ | movn CARG4, #~LJ_TSTR
+ | add CARG2, BASE, RB, lsl #3
+ | add CARG4, STR:RC, CARG4, lsl #47
+ | b >2
+ |
+ |->vmeta_tgets:
+ | movk CARG2, #(LJ_TTAB>>1)&0xffff, lsl #48
+ | str CARG2, GL->tmptv
+ | add CARG2, GL, #offsetof(global_State, tmptv)
+ |2:
+ | add CARG3, sp, TMPDofs
+ | str CARG4, TMPD
+ | b >1
+ |
+ |->vmeta_tgetb: // RB = table, RC = index
+ | add_TISNUM RC, RC
+ | add CARG2, BASE, RB, lsl #3
+ | add CARG3, sp, TMPDofs
+ | str RC, TMPD
+ | b >1
+ |
+ |->vmeta_tgetv: // RB = table, RC = key
+ | add CARG2, BASE, RB, lsl #3
+ | add CARG3, BASE, RC, lsl #3
+ |1:
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k)
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | cbz CRET1, >3
+ | ldr TMP0, [CRET1]
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ |
+ |3: // Call __index metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k
+ | sub TMP1, BASE, #FRAME_CONT
+ | ldr BASE, L->top
+ | mov NARGS8:RC, #16 // 2 args for func(t, k).
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here.
+ | str PC, [BASE, #-24] // [cont|PC]
+ | sub PC, BASE, TMP1
+ | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
+ | b ->vm_call_dispatch_f
+ |
+ |->vmeta_tgetr:
+ | sxtw CARG2, TMP1w
+ | bl extern lj_tab_getinth // (GCtab *t, int32_t key)
+ | // Returns cTValue * or NULL.
+ | mov_nil TMP0
+ | cbz CRET1, ->BC_TGETR_Z
+ | ldr TMP0, [CRET1]
+ | b ->BC_TGETR_Z
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->vmeta_tsets1:
+ | movn CARG4, #~LJ_TSTR
+ | add CARG2, BASE, RB, lsl #3
+ | add CARG4, STR:RC, CARG4, lsl #47
+ | b >2
+ |
+ |->vmeta_tsets:
+ | movk CARG2, #(LJ_TTAB>>1)&0xffff, lsl #48
+ | str CARG2, GL->tmptv
+ | add CARG2, GL, #offsetof(global_State, tmptv)
+ |2:
+ | add CARG3, sp, TMPDofs
+ | str CARG4, TMPD
+ | b >1
+ |
+ |->vmeta_tsetb: // RB = table, RC = index
+ | add_TISNUM RC, RC
+ | add CARG2, BASE, RB, lsl #3
+ | add CARG3, sp, TMPDofs
+ | str RC, TMPD
+ | b >1
+ |
+ |->vmeta_tsetv:
+ | add CARG2, BASE, RB, lsl #3
+ | add CARG3, BASE, RC, lsl #3
+ |1:
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | ldr TMP0, [BASE, RA, lsl #3]
+ | cbz CRET1, >3
+ | // NOBARRIER: lj_meta_tset ensures the table is not black.
+ | str TMP0, [CRET1]
+ | ins_next
+ |
+ |3: // Call __newindex metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
+ | sub TMP1, BASE, #FRAME_CONT
+ | ldr BASE, L->top
+ | mov NARGS8:RC, #24 // 3 args for func(t, k, v).
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here.
+ | str TMP0, [BASE, #16] // Copy value to third argument.
+ | str PC, [BASE, #-24] // [cont|PC]
+ | sub PC, BASE, TMP1
+ | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
+ | b ->vm_call_dispatch_f
+ |
+ |->vmeta_tsetr:
+ | sxtw CARG3, TMP1w
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key)
+ | // Returns TValue *.
+ | b ->BC_TSETR_Z
+ |
+ |//-- Comparison metamethods ---------------------------------------------
+ |
+ |->vmeta_comp:
+ | add CARG2, BASE, RA, lsl #3
+ | sub PC, PC, #4
+ | add CARG3, BASE, RC, lsl #3
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | uxtb CARG4w, INSw
+ | bl extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op)
+ | // Returns 0/1 or TValue * (metamethod).
+ |3:
+ | cmp CRET1, #1
+ | bhi ->vmeta_binop
+ |4:
+ | ldrh RBw, [PC, # OFS_RD]
+ | add PC, PC, #4
+ | add RB, PC, RB, lsl #2
+ | sub RB, RB, #0x20000
+ | csel PC, PC, RB, lo
+ |->cont_nop:
+ | ins_next
+ |
+ |->cont_ra: // RA = resultptr
+ | ldr INSw, [PC, #-4]
+ | ldr TMP0, [RA]
+ | decode_RA TMP1, INS
+ | str TMP0, [BASE, TMP1, lsl #3]
+ | b ->cont_nop
+ |
+ |->cont_condt: // RA = resultptr
+ | ldr TMP0, [RA]
+ | mov_true TMP1
+ | cmp TMP1, TMP0 // Branch if result is true.
+ | b <4
+ |
+ |->cont_condf: // RA = resultptr
+ | ldr TMP0, [RA]
+ | mov_false TMP1
+ | cmp TMP0, TMP1 // Branch if result is false.
+ | b <4
+ |
+ |->vmeta_equal:
+ | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV.
+ | and TAB:CARG3, CARG3, #LJ_GCVMASK
+ | sub PC, PC, #4
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne)
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+ |
+ |->vmeta_equal_cd:
+ |.if FFI
+ | sub PC, PC, #4
+ | str BASE, L->base
+ | mov CARG1, L
+ | mov CARG2, INS
+ | str PC, SAVE_PC
+ | bl extern lj_meta_equal_cd // (lua_State *L, BCIns op)
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+ |.endif
+ |
+ |->vmeta_istype:
+ | sub PC, PC, #4
+ | str BASE, L->base
+ | mov CARG1, L
+ | mov CARG2, RA
+ | mov CARG3, RC
+ | str PC, SAVE_PC
+ | bl extern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp)
+ | b ->cont_nop
+ |
+ |//-- Arithmetic metamethods ---------------------------------------------
+ |
+ |->vmeta_arith_vn:
+ | add CARG3, BASE, RB, lsl #3
+ | add CARG4, KBASE, RC, lsl #3
+ | b >1
+ |
+ |->vmeta_arith_nv:
+ | add CARG4, BASE, RB, lsl #3
+ | add CARG3, KBASE, RC, lsl #3
+ | b >1
+ |
+ |->vmeta_unm:
+ | add CARG3, BASE, RC, lsl #3
+ | mov CARG4, CARG3
+ | b >1
+ |
+ |->vmeta_arith_vv:
+ | add CARG3, BASE, RB, lsl #3
+ | add CARG4, BASE, RC, lsl #3
+ |1:
+ | uxtb CARG5w, INSw
+ | add CARG2, BASE, RA, lsl #3
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | cbz CRET1, ->cont_nop
+ |
+ | // Call metamethod for binary op.
+ |->vmeta_binop:
+ | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2
+ | sub TMP1, CRET1, BASE
+ | str PC, [CRET1, #-24] // [cont|PC]
+ | add PC, TMP1, #FRAME_CONT
+ | mov BASE, CRET1
+ | mov NARGS8:RC, #16 // 2 args for func(o1, o2).
+ | b ->vm_call_dispatch
+ |
+ |->vmeta_len:
+ | add CARG2, BASE, RC, lsl #3
+#if LJ_52
+ | mov TAB:RC, TAB:CARG1 // Save table (ignored for other types).
+#endif
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_meta_len // (lua_State *L, TValue *o)
+ | // Returns NULL (retry) or TValue * (metamethod base).
+#if LJ_52
+ | cbnz CRET1, ->vmeta_binop // Binop call for compatibility.
+ | mov TAB:CARG1, TAB:RC
+ | b ->BC_LEN_Z
+#else
+ | b ->vmeta_binop // Binop call for compatibility.
+#endif
+ |
+ |//-- Call metamethod ----------------------------------------------------
+ |
+ |->vmeta_call: // Resolve and call __call metamethod.
+ | // RB = old base, BASE = new base, RC = nargs*8
+ | mov CARG1, L
+ | str RB, L->base // This is the callers base!
+ | sub CARG2, BASE, #16
+ | str PC, SAVE_PC
+ | add CARG3, BASE, NARGS8:RC
+ | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here.
+ | add NARGS8:RC, NARGS8:RC, #8 // Got one more argument now.
+ | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
+ | ins_call
+ |
+ |->vmeta_callt: // Resolve __call for BC_CALLT.
+ | // BASE = old base, RA = new base, RC = nargs*8
+ | mov CARG1, L
+ | str BASE, L->base
+ | sub CARG2, RA, #16
+ | str PC, SAVE_PC
+ | add CARG3, RA, NARGS8:RC
+ | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ | ldr TMP1, [RA, FRAME_FUNC] // Guaranteed to be a function here.
+ | ldr PC, [BASE, FRAME_PC]
+ | add NARGS8:RC, NARGS8:RC, #8 // Got one more argument now.
+ | and LFUNC:CARG3, TMP1, #LJ_GCVMASK
+ | b ->BC_CALLT2_Z
+ |
+ |//-- Argument coercion for 'for' statement ------------------------------
+ |
+ |->vmeta_for:
+ | mov CARG1, L
+ | str BASE, L->base
+ | mov CARG2, RA
+ | str PC, SAVE_PC
+ | bl extern lj_meta_for // (lua_State *L, TValue *base)
+ | ldr INSw, [PC, #-4]
+ |.if JIT
+ | uxtb TMP0w, INSw
+ |.endif
+ | decode_RA RA, INS
+ | decode_RD RC, INS
+ |.if JIT
+ | cmp TMP0, #BC_JFORI
+ | beq =>BC_JFORI
+ |.endif
+ | b =>BC_FORI
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Fast functions -----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro .ffunc, name
+ |->ff_ .. name:
+ |.endmacro
+ |
+ |.macro .ffunc_1, name
+ |->ff_ .. name:
+ | ldr CARG1, [BASE]
+ | cmp NARGS8:RC, #8
+ | blo ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_2, name
+ |->ff_ .. name:
+ | ldp CARG1, CARG2, [BASE]
+ | cmp NARGS8:RC, #16
+ | blo ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_n, name
+ | .ffunc name
+ | ldr CARG1, [BASE]
+ | cmp NARGS8:RC, #8
+ | ldr FARG1, [BASE]
+ | blo ->fff_fallback
+ | checknum CARG1, ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_nn, name
+ | .ffunc name
+ | ldp CARG1, CARG2, [BASE]
+ | cmp NARGS8:RC, #16
+ | ldp FARG1, FARG2, [BASE]
+ | blo ->fff_fallback
+ | checknum CARG1, ->fff_fallback
+ | checknum CARG2, ->fff_fallback
+ |.endmacro
+ |
+ |// Inlined GC threshold check. Caveat: uses CARG1 and CARG2.
+ |.macro ffgccheck
+ | ldp CARG1, CARG2, GL->gc.total // Assumes threshold follows total.
+ | cmp CARG1, CARG2
+ | blt >1
+ | bl ->fff_gcstep
+ |1:
+ |.endmacro
+ |
+ |//-- Base library: checks -----------------------------------------------
+ |
+ |.ffunc_1 assert
+ | ldr PC, [BASE, FRAME_PC]
+ | mov_false TMP1
+ | cmp CARG1, TMP1
+ | bhs ->fff_fallback
+ | str CARG1, [BASE, #-16]
+ | sub RB, BASE, #8
+ | subs RA, NARGS8:RC, #8
+ | add RC, NARGS8:RC, #8 // Compute (nresults+1)*8.
+ | cbz RA, ->fff_res // Done if exactly 1 argument.
+ |1:
+ | ldr CARG1, [RB, #16]
+ | sub RA, RA, #8
+ | str CARG1, [RB], #8
+ | cbnz RA, <1
+ | b ->fff_res
+ |
+ |.ffunc_1 type
+ | mov TMP0, #~LJ_TISNUM
+ | asr ITYPE, CARG1, #47
+ | cmn ITYPE, #~LJ_TISNUM
+ | csinv TMP1, TMP0, ITYPE, lo
+ | add TMP1, TMP1, #offsetof(GCfuncC, upvalue)/8
+ | ldr CARG1, [CFUNC:CARG3, TMP1, lsl #3]
+ | b ->fff_restv
+ |
+ |//-- Base library: getters and setters ---------------------------------
+ |
+ |.ffunc_1 getmetatable
+ | asr ITYPE, CARG1, #47
+ | cmn ITYPE, #-LJ_TTAB
+ | ccmn ITYPE, #-LJ_TUDATA, #4, ne
+ | and TAB:CARG1, CARG1, #LJ_GCVMASK
+ | bne >6
+ |1: // Field metatable must be at same offset for GCtab and GCudata!
+ | ldr TAB:RB, TAB:CARG1->metatable
+ |2:
+ | mov_nil CARG1
+ | ldr STR:RC, GL->gcroot[GCROOT_MMNAME+MM_metatable]
+ | cbz TAB:RB, ->fff_restv
+ | ldr TMP1w, TAB:RB->hmask
+ | ldr TMP2w, STR:RC->sid
+ | ldr NODE:CARG3, TAB:RB->node
+ | and TMP1w, TMP1w, TMP2w // idx = str->sid & tab->hmask
+ | add TMP1, TMP1, TMP1, lsl #1
+ | movn CARG4, #~LJ_TSTR
+ | add NODE:CARG3, NODE:CARG3, TMP1, lsl #3 // node = tab->node + idx*3*8
+ | add CARG4, STR:RC, CARG4, lsl #47 // Tagged key to look for.
+ |3: // Rearranged logic, because we expect _not_ to find the key.
+ | ldp CARG1, TMP0, NODE:CARG3->val
+ | ldr NODE:CARG3, NODE:CARG3->next
+ | cmp TMP0, CARG4
+ | beq >5
+ | cbnz NODE:CARG3, <3
+ |4:
+ | mov CARG1, RB // Use metatable as default result.
+ | movk CARG1, #(LJ_TTAB>>1)&0xffff, lsl #48
+ | b ->fff_restv
+ |5:
+ | cmp_nil TMP0
+ | bne ->fff_restv
+ | b <4
+ |
+ |6:
+ | movn TMP0, #~LJ_TISNUM
+ | cmp ITYPE, TMP0
+ | csel ITYPE, ITYPE, TMP0, hs
+ | sub TMP1, GL, ITYPE, lsl #3
+ | ldr TAB:RB, [TMP1, #offsetof(global_State, gcroot[GCROOT_BASEMT])-8]
+ | b <2
+ |
+ |.ffunc_2 setmetatable
+ | // Fast path: no mt for table yet and not clearing the mt.
+ | checktp TMP1, CARG1, LJ_TTAB, ->fff_fallback
+ | ldr TAB:TMP0, TAB:TMP1->metatable
+ | asr ITYPE, CARG2, #47
+ | ldrb TMP2w, TAB:TMP1->marked
+ | cmn ITYPE, #-LJ_TTAB
+ | and TAB:CARG2, CARG2, #LJ_GCVMASK
+ | ccmp TAB:TMP0, #0, #0, eq
+ | bne ->fff_fallback
+ | str TAB:CARG2, TAB:TMP1->metatable
+ | tbz TMP2w, #2, ->fff_restv // isblack(table)
+ | barrierback TAB:TMP1, TMP2w, TMP0
+ | b ->fff_restv
+ |
+ |.ffunc rawget
+ | ldr CARG2, [BASE]
+ | cmp NARGS8:RC, #16
+ | blo ->fff_fallback
+ | checktab CARG2, ->fff_fallback
+ | mov CARG1, L
+ | add CARG3, BASE, #8
+ | bl extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
+ | // Returns cTValue *.
+ | ldr CARG1, [CRET1]
+ | b ->fff_restv
+ |
+ |//-- Base library: conversions ------------------------------------------
+ |
+ |.ffunc tonumber
+ | // Only handles the number case inline (without a base argument).
+ | ldr CARG1, [BASE]
+ | cmp NARGS8:RC, #8
+ | bne ->fff_fallback
+ | checknumber CARG1, ->fff_fallback
+ | b ->fff_restv
+ |
+ |.ffunc_1 tostring
+ | // Only handles the string or number case inline.
+ | asr ITYPE, CARG1, #47
+ | cmn ITYPE, #-LJ_TSTR
+ | // A __tostring method in the string base metatable is ignored.
+ | beq ->fff_restv
+ | // Handle numbers inline, unless a number base metatable is present.
+ | ldr TMP1, GL->gcroot[GCROOT_BASEMT_NUM]
+ | str BASE, L->base
+ | cmn ITYPE, #-LJ_TISNUM
+ | ccmp TMP1, #0, #0, ls
+ | str PC, SAVE_PC // Redundant (but a defined value).
+ | bne ->fff_fallback
+ | ffgccheck
+ | mov CARG1, L
+ | mov CARG2, BASE
+ | bl extern lj_strfmt_number // (lua_State *L, cTValue *o)
+ | // Returns GCstr *.
+ | movn TMP1, #~LJ_TSTR
+ | ldr BASE, L->base
+ | add CARG1, CARG1, TMP1, lsl #47
+ | b ->fff_restv
+ |
+ |//-- Base library: iterators -------------------------------------------
+ |
+ |.ffunc_1 next
+ | checktp CARG1, LJ_TTAB, ->fff_fallback
+ | str TISNIL, [BASE, NARGS8:RC] // Set missing 2nd arg to nil.
+ | ldr PC, [BASE, FRAME_PC]
+ | add CARG2, BASE, #8
+ | sub CARG3, BASE, #16
+ | bl extern lj_tab_next // (GCtab *t, cTValue *key, TValue *o)
+ | // Returns 1=found, 0=end, -1=error.
+ | mov RC, #(2+1)*8
+ | tbnz CRET1w, #31, ->fff_fallback // Invalid key.
+ | cbnz CRET1, ->fff_res // Found key/value.
+ | // End of traversal: return nil.
+ | str TISNIL, [BASE, #-16]
+ | b ->fff_res1
+ |
+ |.ffunc_1 pairs
+ | checktp TMP1, CARG1, LJ_TTAB, ->fff_fallback
+#if LJ_52
+ | ldr TAB:CARG2, TAB:TMP1->metatable
+#endif
+ | ldr CFUNC:CARG4, CFUNC:CARG3->upvalue[0]
+ | ldr PC, [BASE, FRAME_PC]
+#if LJ_52
+ | cbnz TAB:CARG2, ->fff_fallback
+#endif
+ | mov RC, #(3+1)*8
+ | stp CFUNC:CARG4, CARG1, [BASE, #-16]
+ | str TISNIL, [BASE]
+ | b ->fff_res
+ |
+ |.ffunc_2 ipairs_aux
+ | checktab CARG1, ->fff_fallback
+ | checkint CARG2, ->fff_fallback
+ | ldr TMP1w, TAB:CARG1->asize
+ | ldr CARG3, TAB:CARG1->array
+ | ldr TMP0w, TAB:CARG1->hmask
+ | add CARG2w, CARG2w, #1
+ | cmp CARG2w, TMP1w
+ | ldr PC, [BASE, FRAME_PC]
+ | add_TISNUM TMP2, CARG2
+ | mov RC, #(0+1)*8
+ | str TMP2, [BASE, #-16]
+ | bhs >2 // Not in array part?
+ | ldr TMP0, [CARG3, CARG2, lsl #3]
+ |1:
+ | mov TMP1, #(2+1)*8
+ | cmp_nil TMP0
+ | str TMP0, [BASE, #-8]
+ | csel RC, RC, TMP1, eq
+ | b ->fff_res
+ |2: // Check for empty hash part first. Otherwise call C function.
+ | cbz TMP0w, ->fff_res
+ | bl extern lj_tab_getinth // (GCtab *t, int32_t key)
+ | // Returns cTValue * or NULL.
+ | cbz CRET1, ->fff_res
+ | ldr TMP0, [CRET1]
+ | b <1
+ |
+ |.ffunc_1 ipairs
+ | checktp TMP1, CARG1, LJ_TTAB, ->fff_fallback
+#if LJ_52
+ | ldr TAB:CARG2, TAB:TMP1->metatable
+#endif
+ | ldr CFUNC:CARG4, CFUNC:CARG3->upvalue[0]
+ | ldr PC, [BASE, FRAME_PC]
+#if LJ_52
+ | cbnz TAB:CARG2, ->fff_fallback
+#endif
+ | mov RC, #(3+1)*8
+ | stp CFUNC:CARG4, CARG1, [BASE, #-16]
+ | str TISNUM, [BASE]
+ | b ->fff_res
+ |
+ |//-- Base library: catch errors ----------------------------------------
+ |
+ |.ffunc pcall
++ | ldr TMP1, L->maxstack
++ | add TMP2, BASE, NARGS8:RC
++ | cmp TMP1, TMP2
++ | blo ->fff_fallback
+ | cmp NARGS8:RC, #8
+ | ldrb TMP0w, GL->hookmask
+ | blo ->fff_fallback
+ | sub NARGS8:RC, NARGS8:RC, #8
+ | mov RB, BASE
+ | add BASE, BASE, #16
+ | ubfx TMP0w, TMP0w, #HOOK_ACTIVE_SHIFT, #1
+ | add PC, TMP0, #16+FRAME_PCALL
+ | beq ->vm_call_dispatch
+ |1:
+ | add TMP2, BASE, NARGS8:RC
+ |2:
+ | ldr TMP0, [TMP2, #-16]
+ | str TMP0, [TMP2, #-8]!
+ | cmp TMP2, BASE
+ | bne <2
+ | b ->vm_call_dispatch
+ |
+ |.ffunc xpcall
++ | ldr TMP1, L->maxstack
++ | add TMP2, BASE, NARGS8:RC
++ | cmp TMP1, TMP2
++ | blo ->fff_fallback
+ | ldp CARG1, CARG2, [BASE]
+ | ldrb TMP0w, GL->hookmask
+ | subs NARGS8:TMP1, NARGS8:RC, #16
+ | blo ->fff_fallback
+ | mov RB, BASE
+ | asr ITYPE, CARG2, #47
+ | ubfx TMP0w, TMP0w, #HOOK_ACTIVE_SHIFT, #1
+ | cmn ITYPE, #-LJ_TFUNC
+ | add PC, TMP0, #24+FRAME_PCALL
+ | bne ->fff_fallback // Traceback must be a function.
+ | mov NARGS8:RC, NARGS8:TMP1
+ | add BASE, BASE, #24
+ | stp CARG2, CARG1, [RB] // Swap function and traceback.
+ | cbz NARGS8:RC, ->vm_call_dispatch
+ | b <1
+ |
+ |//-- Coroutine library --------------------------------------------------
+ |
+ |.macro coroutine_resume_wrap, resume
+ |.if resume
+ |.ffunc_1 coroutine_resume
+ | checktp CARG1, LJ_TTHREAD, ->fff_fallback
+ |.else
+ |.ffunc coroutine_wrap_aux
+ | ldr L:CARG1, CFUNC:CARG3->upvalue[0].gcr
+ | and L:CARG1, CARG1, #LJ_GCVMASK
+ |.endif
+ | ldr PC, [BASE, FRAME_PC]
+ | str BASE, L->base
+ | ldp RB, CARG2, L:CARG1->base
+ | ldrb TMP1w, L:CARG1->status
+ | add TMP0, CARG2, TMP1
+ | str PC, SAVE_PC
+ | cmp TMP0, RB
+ | beq ->fff_fallback
+ | cmp TMP1, #LUA_YIELD
+ | add TMP0, CARG2, #8
+ | csel CARG2, CARG2, TMP0, hs
+ | ldr CARG4, L:CARG1->maxstack
+ | add CARG3, CARG2, NARGS8:RC
+ | ldr RB, L:CARG1->cframe
+ | ccmp CARG3, CARG4, #2, ls
+ | ccmp RB, #0, #2, ls
+ | bhi ->fff_fallback
+ |.if resume
+ | sub CARG3, CARG3, #8 // Keep resumed thread in stack for GC.
+ | add BASE, BASE, #8
+ | sub NARGS8:RC, NARGS8:RC, #8
+ |.endif
+ | str CARG3, L:CARG1->top
+ | str BASE, L->top
+ | cbz NARGS8:RC, >3
+ |2: // Move args to coroutine.
+ | ldr TMP0, [BASE, RB]
+ | cmp RB, NARGS8:RC
+ | str TMP0, [CARG2, RB]
+ | add RB, RB, #8
+ | bne <2
+ |3:
+ | mov CARG3, #0
+ | mov L:RA, L:CARG1
+ | mov CARG4, #0
+ | bl ->vm_resume // (lua_State *L, TValue *base, 0, 0)
+ | // Returns thread status.
+ |4:
+ | ldp CARG3, CARG4, L:RA->base
+ | cmp CRET1, #LUA_YIELD
+ | ldr BASE, L->base
+ | str L, GL->cur_L
+ | st_vmstate ST_INTERP
+ | bhi >8
+ | sub RC, CARG4, CARG3
+ | ldr CARG1, L->maxstack
+ | add CARG2, BASE, RC
+ | cbz RC, >6 // No results?
+ | cmp CARG2, CARG1
+ | mov RB, #0
+ | bhi >9 // Need to grow stack?
+ |
+ | sub CARG4, RC, #8
+ | str CARG3, L:RA->top // Clear coroutine stack.
+ |5: // Move results from coroutine.
+ | ldr TMP0, [CARG3, RB]
+ | cmp RB, CARG4
+ | str TMP0, [BASE, RB]
+ | add RB, RB, #8
+ | bne <5
+ |6:
+ |.if resume
+ | mov_true TMP1
+ | add RC, RC, #16
+ |7:
+ | str TMP1, [BASE, #-8] // Prepend true/false to results.
+ | sub RA, BASE, #8
+ |.else
+ | mov RA, BASE
+ | add RC, RC, #8
+ |.endif
+ | ands CARG1, PC, #FRAME_TYPE
+ | str PC, SAVE_PC
+ | str RCw, SAVE_MULTRES
+ | beq ->BC_RET_Z
+ | b ->vm_return
+ |
+ |8: // Coroutine returned with error (at co->top-1).
+ |.if resume
+ | ldr TMP0, [CARG4, #-8]!
+ | mov_false TMP1
+ | mov RC, #(2+1)*8
+ | str CARG4, L:RA->top // Remove error from coroutine stack.
+ | str TMP0, [BASE] // Copy error message.
+ | b <7
+ |.else
+ | mov CARG1, L
+ | mov CARG2, L:RA
+ | bl extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co)
+ | // Never returns.
+ |.endif
+ |
+ |9: // Handle stack expansion on return from yield.
+ | mov CARG1, L
+ | lsr CARG2, RC, #3
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | mov CRET1, #0
+ | b <4
+ |.endmacro
+ |
+ | coroutine_resume_wrap 1 // coroutine.resume
+ | coroutine_resume_wrap 0 // coroutine.wrap
+ |
+ |.ffunc coroutine_yield
+ | ldr TMP0, L->cframe
+ | add TMP1, BASE, NARGS8:RC
+ | mov CRET1, #LUA_YIELD
+ | stp BASE, TMP1, L->base
+ | tbz TMP0, #0, ->fff_fallback
+ | str xzr, L->cframe
+ | strb CRET1w, L->status
+ | b ->vm_leave_unw
+ |
+ |//-- Math library -------------------------------------------------------
+ |
+ |.macro math_round, func, round
+ | .ffunc math_ .. func
+ | ldr CARG1, [BASE]
+ | cmp NARGS8:RC, #8
+ | ldr d0, [BASE]
+ | blo ->fff_fallback
+ | cmp TISNUMhi, CARG1, lsr #32
+ | beq ->fff_restv
+ | blo ->fff_fallback
+ | round d0, d0
+ | b ->fff_resn
+ |.endmacro
+ |
+ | math_round floor, frintm
+ | math_round ceil, frintp
+ |
+ |.ffunc_1 math_abs
+ | checknumber CARG1, ->fff_fallback
+ | and CARG1, CARG1, #U64x(7fffffff,ffffffff)
+ | bne ->fff_restv
+ | eor CARG2w, CARG1w, CARG1w, asr #31
+ | movz CARG3, #0x41e0, lsl #48 // 2^31.
+ | subs CARG1w, CARG2w, CARG1w, asr #31
+ | add_TISNUM CARG1, CARG1
+ | csel CARG1, CARG1, CARG3, pl
+ | // Fallthrough.
+ |
+ |->fff_restv:
+ | // CARG1 = TValue result.
+ | ldr PC, [BASE, FRAME_PC]
+ | str CARG1, [BASE, #-16]
+ |->fff_res1:
+ | // PC = return.
+ | mov RC, #(1+1)*8
+ |->fff_res:
+ | // RC = (nresults+1)*8, PC = return.
+ | ands CARG1, PC, #FRAME_TYPE
+ | str RCw, SAVE_MULTRES
+ | sub RA, BASE, #16
+ | bne ->vm_return
+ | ldr INSw, [PC, #-4]
+ | decode_RB RB, INS
+ |5:
+ | cmp RC, RB, lsl #3 // More results expected?
+ | blo >6
+ | decode_RA TMP1, INS
+ | // Adjust BASE. KBASE is assumed to be set for the calling frame.
+ | sub BASE, RA, TMP1, lsl #3
+ | ins_next
+ |
+ |6: // Fill up results with nil.
+ | add TMP1, RA, RC
+ | add RC, RC, #8
+ | str TISNIL, [TMP1, #-8]
+ | b <5
+ |
+ |.macro math_extern, func
+ | .ffunc_n math_ .. func
+ | bl extern func
+ | b ->fff_resn
+ |.endmacro
+ |
+ |.macro math_extern2, func
+ | .ffunc_nn math_ .. func
+ | bl extern func
+ | b ->fff_resn
+ |.endmacro
+ |
+ |.ffunc_n math_sqrt
+ | fsqrt d0, d0
+ |->fff_resn:
+ | ldr PC, [BASE, FRAME_PC]
+ | str d0, [BASE, #-16]
+ | b ->fff_res1
+ |
+ |.ffunc math_log
+ | ldr CARG1, [BASE]
+ | cmp NARGS8:RC, #8
+ | ldr FARG1, [BASE]
+ | bne ->fff_fallback // Need exactly 1 argument.
+ | checknum CARG1, ->fff_fallback
+ | bl extern log
+ | b ->fff_resn
+ |
+ | math_extern log10
+ | math_extern exp
+ | math_extern sin
+ | math_extern cos
+ | math_extern tan
+ | math_extern asin
+ | math_extern acos
+ | math_extern atan
+ | math_extern sinh
+ | math_extern cosh
+ | math_extern tanh
+ | math_extern2 pow
+ | math_extern2 atan2
+ | math_extern2 fmod
+ |
+ |.ffunc_2 math_ldexp
+ | ldr FARG1, [BASE]
+ | checknum CARG1, ->fff_fallback
+ | checkint CARG2, ->fff_fallback
+ | sxtw CARG1, CARG2w
+ | bl extern ldexp // (double x, int exp)
+ | b ->fff_resn
+ |
+ |.ffunc_n math_frexp
+ | add CARG1, sp, TMPDofs
+ | bl extern frexp
+ | ldr CARG2w, TMPD
+ | ldr PC, [BASE, FRAME_PC]
+ | str d0, [BASE, #-16]
+ | mov RC, #(2+1)*8
+ | add_TISNUM CARG2, CARG2
+ | str CARG2, [BASE, #-8]
+ | b ->fff_res
+ |
+ |.ffunc_n math_modf
+ | sub CARG1, BASE, #16
+ | ldr PC, [BASE, FRAME_PC]
+ | bl extern modf
+ | mov RC, #(2+1)*8
+ | str d0, [BASE, #-8]
+ | b ->fff_res
+ |
+ |.macro math_minmax, name, cond, fcond
+ | .ffunc_1 name
+ | add RB, BASE, RC
+ | add RA, BASE, #8
+ | checkint CARG1, >4
+ |1: // Handle integers.
+ | ldr CARG2, [RA]
+ | cmp RA, RB
+ | bhs ->fff_restv
+ | checkint CARG2, >3
+ | cmp CARG1w, CARG2w
+ | add RA, RA, #8
+ | csel CARG1, CARG2, CARG1, cond
+ | b <1
+ |3: // Convert intermediate result to number and continue below.
+ | scvtf d0, CARG1w
+ | blo ->fff_fallback
+ | ldr d1, [RA]
+ | b >6
+ |
+ |4:
+ | ldr d0, [BASE]
+ | blo ->fff_fallback
+ |5: // Handle numbers.
+ | ldr CARG2, [RA]
+ | ldr d1, [RA]
+ | cmp RA, RB
+ | bhs ->fff_resn
+ | checknum CARG2, >7
+ |6:
+ | fcmp d0, d1
+ | add RA, RA, #8
+ | fcsel d0, d1, d0, fcond
+ | b <5
+ |7: // Convert integer to number and continue above.
+ | scvtf d1, CARG2w
+ | blo ->fff_fallback
+ | b <6
+ |.endmacro
+ |
+ | math_minmax math_min, gt, pl
+ | math_minmax math_max, lt, le
+ |
+ |//-- String library -----------------------------------------------------
+ |
+ |.ffunc string_byte // Only handle the 1-arg case here.
+ | ldp PC, CARG1, [BASE, FRAME_PC]
+ | cmp NARGS8:RC, #8
+ | asr ITYPE, CARG1, #47
+ | ccmn ITYPE, #-LJ_TSTR, #0, eq
+ | and STR:CARG1, CARG1, #LJ_GCVMASK
+ | bne ->fff_fallback
+ | ldrb TMP0w, STR:CARG1[1] // Access is always ok (NUL at end).
+ | ldr CARG3w, STR:CARG1->len
+ | add_TISNUM TMP0, TMP0
+ | str TMP0, [BASE, #-16]
+ | mov RC, #(0+1)*8
+ | cbz CARG3, ->fff_res
+ | b ->fff_res1
+ |
+ |.ffunc string_char // Only handle the 1-arg case here.
+ | ffgccheck
+ | ldp PC, CARG1, [BASE, FRAME_PC]
+ | cmp CARG1w, #255
+ | ccmp NARGS8:RC, #8, #0, ls // Need exactly 1 argument.
+ | bne ->fff_fallback
+ | checkint CARG1, ->fff_fallback
+ | mov CARG3, #1
+ | // Point to the char inside the integer in the stack slot.
+ |.if ENDIAN_LE
+ | mov CARG2, BASE
+ |.else
+ | add CARG2, BASE, #7
+ |.endif
+ |->fff_newstr:
+ | // CARG2 = str, CARG3 = len.
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_str_new // (lua_State *L, char *str, size_t l)
+ |->fff_resstr:
+ | // Returns GCstr *.
+ | ldr BASE, L->base
+ | movn TMP1, #~LJ_TSTR
+ | add CARG1, CARG1, TMP1, lsl #47
+ | b ->fff_restv
+ |
+ |.ffunc string_sub
+ | ffgccheck
+ | ldr CARG1, [BASE]
+ | ldr CARG3, [BASE, #16]
+ | cmp NARGS8:RC, #16
+ | movn RB, #0
+ | beq >1
+ | blo ->fff_fallback
+ | checkint CARG3, ->fff_fallback
+ | sxtw RB, CARG3w
+ |1:
+ | ldr CARG2, [BASE, #8]
+ | checkstr CARG1, ->fff_fallback
+ | ldr TMP1w, STR:CARG1->len
+ | checkint CARG2, ->fff_fallback
+ | sxtw CARG2, CARG2w
+ | // CARG1 = str, TMP1 = str->len, CARG2 = start, RB = end
+ | add TMP2, RB, TMP1
+ | cmp RB, #0
+ | add TMP0, CARG2, TMP1
+ | csinc RB, RB, TMP2, ge // if (end < 0) end += len+1
+ | cmp CARG2, #0
+ | csinc CARG2, CARG2, TMP0, ge // if (start < 0) start += len+1
+ | cmp RB, #0
+ | csel RB, RB, xzr, ge // if (end < 0) end = 0
+ | cmp CARG2, #1
+ | csinc CARG2, CARG2, xzr, ge // if (start < 1) start = 1
+ | cmp RB, TMP1
+ | csel RB, RB, TMP1, le // if (end > len) end = len
+ | add CARG1, STR:CARG1, #sizeof(GCstr)-1
+ | subs CARG3, RB, CARG2 // len = end - start
+ | add CARG2, CARG1, CARG2
+ | add CARG3, CARG3, #1 // len += 1
+ | bge ->fff_newstr
+ | add STR:CARG1, GL, #offsetof(global_State, strempty)
+ | movn TMP1, #~LJ_TSTR
+ | add CARG1, CARG1, TMP1, lsl #47
+ | b ->fff_restv
+ |
+ |.macro ffstring_op, name
+ | .ffunc string_ .. name
+ | ffgccheck
+ | ldr CARG2, [BASE]
+ | cmp NARGS8:RC, #8
+ | asr ITYPE, CARG2, #47
+ | ccmn ITYPE, #-LJ_TSTR, #0, hs
+ | and STR:CARG2, CARG2, #LJ_GCVMASK
+ | bne ->fff_fallback
+ | ldr TMP0, GL->tmpbuf.b
+ | add SBUF:CARG1, GL, #offsetof(global_State, tmpbuf)
+ | str BASE, L->base
+ | str PC, SAVE_PC
+ | str L, GL->tmpbuf.L
+ | str TMP0, GL->tmpbuf.w
+ | bl extern lj_buf_putstr_ .. name
+ | bl extern lj_buf_tostr
+ | b ->fff_resstr
+ |.endmacro
+ |
+ |ffstring_op reverse
+ |ffstring_op lower
+ |ffstring_op upper
+ |
+ |//-- Bit library --------------------------------------------------------
+ |
+ |// FP number to bit conversion for soft-float. Clobbers CARG1-CARG3
+ |->vm_tobit_fb:
+ | bls ->fff_fallback
+ | add CARG2, CARG1, CARG1
+ | mov CARG3, #1076
+ | sub CARG3, CARG3, CARG2, lsr #53
+ | cmp CARG3, #53
+ | bhi >1
+ | and CARG2, CARG2, #U64x(001fffff,ffffffff)
+ | orr CARG2, CARG2, #U64x(00200000,00000000)
+ | cmp CARG1, #0
+ | lsr CARG2, CARG2, CARG3
+ | cneg CARG1w, CARG2w, mi
+ | br lr
+ |1:
+ | mov CARG1w, #0
+ | br lr
+ |
+ |.macro .ffunc_bit, name
+ | .ffunc_1 bit_..name
+ | adr lr, >1
+ | checkint CARG1, ->vm_tobit_fb
+ |1:
+ |.endmacro
+ |
+ |.macro .ffunc_bit_op, name, ins
+ | .ffunc_bit name
+ | mov RA, #8
+ | mov TMP0w, CARG1w
+ | adr lr, >2
+ |1:
+ | ldr CARG1, [BASE, RA]
+ | cmp RA, NARGS8:RC
+ | add RA, RA, #8
+ | bge >9
+ | checkint CARG1, ->vm_tobit_fb
+ |2:
+ | ins TMP0w, TMP0w, CARG1w
+ | b <1
+ |.endmacro
+ |
+ |.ffunc_bit_op band, and
+ |.ffunc_bit_op bor, orr
+ |.ffunc_bit_op bxor, eor
+ |
+ |.ffunc_bit tobit
+ | mov TMP0w, CARG1w
+ |9: // Label reused by .ffunc_bit_op users.
+ | add_TISNUM CARG1, TMP0
+ | b ->fff_restv
+ |
+ |.ffunc_bit bswap
+ | rev TMP0w, CARG1w
+ | add_TISNUM CARG1, TMP0
+ | b ->fff_restv
+ |
+ |.ffunc_bit bnot
+ | mvn TMP0w, CARG1w
+ | add_TISNUM CARG1, TMP0
+ | b ->fff_restv
+ |
+ |.macro .ffunc_bit_sh, name, ins, shmod
+ | .ffunc bit_..name
+ | ldp TMP0, CARG1, [BASE]
+ | cmp NARGS8:RC, #16
+ | blo ->fff_fallback
+ | adr lr, >1
+ | checkint CARG1, ->vm_tobit_fb
+ |1:
+ |.if shmod == 0
+ | mov TMP1, CARG1
+ |.else
+ | neg TMP1, CARG1
+ |.endif
+ | mov CARG1, TMP0
+ | adr lr, >2
+ | checkint CARG1, ->vm_tobit_fb
+ |2:
+ | ins TMP0w, CARG1w, TMP1w
+ | add_TISNUM CARG1, TMP0
+ | b ->fff_restv
+ |.endmacro
+ |
+ |.ffunc_bit_sh lshift, lsl, 0
+ |.ffunc_bit_sh rshift, lsr, 0
+ |.ffunc_bit_sh arshift, asr, 0
+ |.ffunc_bit_sh rol, ror, 1
+ |.ffunc_bit_sh ror, ror, 0
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->fff_fallback: // Call fast function fallback handler.
+ | // BASE = new base, RC = nargs*8
+ | ldp CFUNC:CARG3, PC, [BASE, FRAME_FUNC] // Fallback may overwrite PC.
+ | ldr TMP2, L->maxstack
+ | add TMP1, BASE, NARGS8:RC
+ | stp BASE, TMP1, L->base
+ | and CFUNC:CARG3, CARG3, #LJ_GCVMASK
+ | add TMP1, TMP1, #8*LUA_MINSTACK
+ | ldr CARG3, CFUNC:CARG3->f
+ | str PC, SAVE_PC // Redundant (but a defined value).
+ | cmp TMP1, TMP2
+ | mov CARG1, L
+ | bhi >5 // Need to grow stack.
+ | blr_auth CARG3 // (lua_State *L)
+ | // Either throws an error, or recovers and returns -1, 0 or nresults+1.
+ | ldr BASE, L->base
+ | cmp CRET1w, #0
+ | lsl RC, CRET1, #3
+ | sub RA, BASE, #16
+ | bgt ->fff_res // Returned nresults+1?
+ |1: // Returned 0 or -1: retry fast path.
+ | ldr CARG1, L->top
+ | ldr CFUNC:CARG3, [BASE, FRAME_FUNC]
+ | sub NARGS8:RC, CARG1, BASE
+ | bne ->vm_call_tail // Returned -1?
+ | and CFUNC:CARG3, CARG3, #LJ_GCVMASK
+ | ins_callt // Returned 0: retry fast path.
+ |
+ |// Reconstruct previous base for vmeta_call during tailcall.
+ |->vm_call_tail:
+ | ands TMP0, PC, #FRAME_TYPE
+ | and TMP1, PC, #~FRAME_TYPEP
+ | bne >3
+ | ldrb RAw, [PC, #-4+OFS_RA]
+ | lsl RA, RA, #3
+ | add TMP1, RA, #16
+ |3:
+ | sub RB, BASE, TMP1
+ | b ->vm_call_dispatch // Resolve again for tailcall.
+ |
+ |5: // Grow stack for fallback handler.
+ | mov CARG2, #LUA_MINSTACK
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | ldr BASE, L->base
+ | cmp CARG1, CARG1 // Set zero-flag to force retry.
+ | b <1
+ |
+ |->fff_gcstep: // Call GC step function.
+ | // BASE = new base, RC = nargs*8
+ | sp_auth
+ | add CARG2, BASE, NARGS8:RC // Calculate L->top.
+ | mov RA, lr
+ | stp BASE, CARG2, L->base
+ | str PC, SAVE_PC // Redundant (but a defined value).
+ | mov CARG1, L
+ | bl extern lj_gc_step // (lua_State *L)
+ | ldp BASE, CARG2, L->base
+ | ldr CFUNC:CARG3, [BASE, FRAME_FUNC]
+ | mov lr, RA // Help return address predictor.
+ | sub NARGS8:RC, CARG2, BASE // Calculate nargs*8.
+ | and CFUNC:CARG3, CARG3, #LJ_GCVMASK
+ | ret_auth
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Special dispatch targets -------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_record: // Dispatch target for recording phase.
+ |.if JIT
+ | ldrb CARG1w, GL->hookmask
+ | tst CARG1, #HOOK_VMEVENT // No recording while in vmevent.
+ | bne >5
+ | // Decrement the hookcount for consistency, but always do the call.
+ | ldr CARG2w, GL->hookcount
+ | tst CARG1, #HOOK_ACTIVE
+ | bne >1
+ | sub CARG2w, CARG2w, #1
+ | tst CARG1, #LUA_MASKLINE|LUA_MASKCOUNT
+ | beq >1
+ | str CARG2w, GL->hookcount
+ | b >1
+ |.endif
+ |
+ |->vm_rethook: // Dispatch target for return hooks.
+ | ldrb TMP2w, GL->hookmask
+ | tbz TMP2w, #HOOK_ACTIVE_SHIFT, >1 // Hook already active?
+ |5: // Re-dispatch to static ins.
+ | ldr TMP0, [TMP1, #GG_G2DISP+GG_DISP2STATIC]
+ | br_auth TMP0
+ |
+ |->vm_inshook: // Dispatch target for instr/line hooks.
+ | ldrb TMP2w, GL->hookmask
+ | ldr TMP3w, GL->hookcount
+ | tbnz TMP2w, #HOOK_ACTIVE_SHIFT, <5 // Hook already active?
+ | tst TMP2w, #LUA_MASKLINE|LUA_MASKCOUNT
+ | beq <5
+ | sub TMP3w, TMP3w, #1
+ | str TMP3w, GL->hookcount
+ | cbz TMP3w, >1
+ | tbz TMP2w, #LUA_HOOKLINE, <5
+ |1:
+ | mov CARG1, L
+ | str BASE, L->base
+ | mov CARG2, PC
+ | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
+ | bl extern lj_dispatch_ins // (lua_State *L, const BCIns *pc)
+ |3:
+ | ldr BASE, L->base
+ |4: // Re-dispatch to static ins.
+ | ldr INSw, [PC, #-4]
+ | add TMP1, GL, INS, uxtb #3
+ | decode_RA RA, INS
+ | ldr TMP0, [TMP1, #GG_G2DISP+GG_DISP2STATIC]
+ | decode_RD RC, INS
+ | br_auth TMP0
+ |
+ |->cont_hook: // Continue from hook yield.
+ | ldr CARG1, [CARG4, #-40]
+ | add PC, PC, #4
+ | str CARG1w, SAVE_MULTRES // Restore MULTRES for *M ins.
+ | b <4
+ |
+ |->vm_hotloop: // Hot loop counter underflow.
+ |.if JIT
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Same as curr_topL(L).
+ | add CARG1, GL, #GG_G2DISP+GG_DISP2J
+ | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
+ | str PC, SAVE_PC
+ | ldr CARG3, LFUNC:CARG3->pc
+ | mov CARG2, PC
+ | str L, [GL, #GL_J(L)]
+ | ldrb CARG3w, [CARG3, #PC2PROTO(framesize)]
+ | str BASE, L->base
+ | add CARG3, BASE, CARG3, lsl #3
+ | str CARG3, L->top
+ | bl extern lj_trace_hot // (jit_State *J, const BCIns *pc)
+ | b <3
+ |.endif
+ |
+ |->vm_callhook: // Dispatch target for call hooks.
+ | mov CARG2, PC
+ |.if JIT
+ | b >1
+ |.endif
+ |
+ |->vm_hotcall: // Hot call counter underflow.
+ |.if JIT
+ | orr CARG2, PC, #1
+ |1:
+ |.endif
+ | add TMP1, BASE, NARGS8:RC
+ | str PC, SAVE_PC
+ | mov CARG1, L
+ | sub RA, RA, BASE
+ | stp BASE, TMP1, L->base
+ | bl extern lj_dispatch_call // (lua_State *L, const BCIns *pc)
+ | // Returns ASMFunction.
+ | ldp BASE, TMP1, L->base
+ | str xzr, SAVE_PC // Invalidate for subsequent line hook.
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
+ | add RA, BASE, RA
+ | sub NARGS8:RC, TMP1, BASE
+ | ldr INSw, [PC, #-4]
+ | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
+ | br_auth CRET1
+ |
+ |->cont_stitch: // Trace stitching.
+ |.if JIT
+ | // RA = resultptr, CARG4 = meta base
+ | ldr RBw, SAVE_MULTRES
+ | ldr INSw, [PC, #-4]
+ | ldr TRACE:CARG3, [CARG4, #-40] // Save previous trace.
+ | subs RB, RB, #8
+ | decode_RA RC, INS // Call base.
+ | and CARG3, CARG3, #LJ_GCVMASK
+ | beq >2
+ |1: // Move results down.
+ | ldr CARG1, [RA], #8
+ | subs RB, RB, #8
+ | str CARG1, [BASE, RC, lsl #3]
+ | add RC, RC, #1
+ | bne <1
+ |2:
+ | decode_RA RA, INS
+ | decode_RB RB, INS
+ | add RA, RA, RB
+ |3:
+ | cmp RA, RC
+ | bhi >9 // More results wanted?
+ |
+ | ldrh RAw, TRACE:CARG3->traceno
+ | ldrh RCw, TRACE:CARG3->link
+ | cmp RCw, RAw
+ | beq ->cont_nop // Blacklisted.
+ | cmp RCw, #0
+ | bne =>BC_JLOOP // Jump to stitched trace.
+ |
+ | // Stitch a new trace to the previous trace.
+ | mov CARG1, #GL_J(exitno)
+ | str RAw, [GL, CARG1]
+ | mov CARG1, #GL_J(L)
+ | str L, [GL, CARG1]
+ | str BASE, L->base
+ | add CARG1, GL, #GG_G2J
+ | mov CARG2, PC
+ | bl extern lj_dispatch_stitch // (jit_State *J, const BCIns *pc)
+ | ldr BASE, L->base
+ | b ->cont_nop
+ |
+ |9: // Fill up results with nil.
+ | str TISNIL, [BASE, RC, lsl #3]
+ | add RC, RC, #1
+ | b <3
+ |.endif
+ |
+ |->vm_profhook: // Dispatch target for profiler hook.
+#if LJ_HASPROFILE
+ | mov CARG1, L
+ | str BASE, L->base
+ | mov CARG2, PC
+ | bl extern lj_dispatch_profile // (lua_State *L, const BCIns *pc)
+ | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction.
+ | ldr BASE, L->base
+ | sub PC, PC, #4
+ | b ->cont_nop
+#endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Trace exit handler -------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro savex_, a, b
+ | stp d..a, d..b, [sp, #a*8]
+ | stp x..a, x..b, [sp, #32*8+a*8]
+ |.endmacro
+ |
+ |->vm_exit_handler:
+ |.if JIT
+ | sub sp, sp, #(64*8)
+ | savex_, 0, 1
+ | savex_, 2, 3
+ | savex_, 4, 5
+ | savex_, 6, 7
+ | savex_, 8, 9
+ | savex_, 10, 11
+ | savex_, 12, 13
+ | savex_, 14, 15
+ | savex_, 16, 17
+ | savex_, 18, 19
+ | savex_, 20, 21
+ | savex_, 22, 23
+ | savex_, 24, 25
+ | savex_, 26, 27
+ | savex_, 28, 29
+ | stp d30, d31, [sp, #30*8]
+ | ldr CARG1, [sp, #64*8] // Load original value of lr.
+ | add CARG3, sp, #64*8 // Recompute original value of sp.
+ | mv_vmstate CARG4w, EXIT
+ | stp xzr, CARG3, [sp, #62*8] // Store 0/sp in RID_LR/RID_SP.
+ | sub CARG1, CARG1, lr
+ | ldr L, GL->cur_L
+ | lsr CARG1, CARG1, #2
+ | ldr BASE, GL->jit_base
+ | sub CARG1, CARG1, #2
+ | ldr CARG2w, [lr] // Load trace number.
+ | st_vmstate CARG4w
+ |.if ENDIAN_BE
+ | rev32 CARG2, CARG2
+ |.endif
+ | str BASE, L->base
+ | ubfx CARG2w, CARG2w, #5, #16
+ | str CARG1w, [GL, #GL_J(exitno)]
+ | str CARG2w, [GL, #GL_J(parent)]
+ | str L, [GL, #GL_J(L)]
+ | str xzr, GL->jit_base
+ | add CARG1, GL, #GG_G2J
+ | mov CARG2, sp
+ | bl extern lj_trace_exit // (jit_State *J, ExitState *ex)
+ | // Returns MULTRES (unscaled) or negated error code.
+ | ldr CARG2, L->cframe
+ | ldr BASE, L->base
+ | and sp, CARG2, #CFRAME_RAWMASK
+ | ldr PC, SAVE_PC // Get SAVE_PC.
+ | str L, SAVE_L // Set SAVE_L (on-trace resume/yield).
+ | b >1
+ |.endif
+ |
+ |->vm_exit_interp:
+ | // CARG1 = MULTRES or negated error code, BASE, PC and GL set.
+ |.if JIT
+ | ldr L, SAVE_L
+ |1:
+ | init_constants
+ | cmn CARG1w, #LUA_ERRERR
+ | bhs >9 // Check for error from exit.
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | lsl RC, CARG1, #3
+ | and LFUNC:CARG2, CARG2, #LJ_GCVMASK
+ | str RCw, SAVE_MULTRES
+ | str BASE, L->base
+ | ldr CARG2, LFUNC:CARG2->pc
+ | str xzr, GL->jit_base
+ | mv_vmstate CARG4w, INTERP
+ | ldr KBASE, [CARG2, #PC2PROTO(k)]
+ | // Modified copy of ins_next which handles function header dispatch, too.
+ | ldrb RBw, [PC, # OFS_OP]
+ | ldr INSw, [PC], #4
+ | st_vmstate CARG4w
+ | cmn CARG1w, #17 // Static dispatch?
+ | beq >5
+ | cmp RBw, #BC_FUNCC+2 // Fast function?
+ | add TMP1, GL, INS, uxtb #3
+ | bhs >4
+ |2:
+ | cmp RBw, #BC_FUNCF // Function header?
+ | add TMP0, GL, RB, uxtb #3
+ | ldr RB, [TMP0, #GG_G2DISP]
+ | decode_RA RA, INS
+ | lsr TMP0, INS, #16
+ | csel RC, TMP0, RC, lo
+ | blo >3
+ | ldr CARG3, [BASE, FRAME_FUNC]
+ | sub RC, RC, #8
+ | add RA, BASE, RA, lsl #3 // Yes: RA = BASE+framesize*8, RC = nargs*8
+ | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
+ |3:
+ | br_auth RB
+ |
+ |4: // Check frame below fast function.
+ | ldr CARG1, [BASE, FRAME_PC]
+ | ands CARG2, CARG1, #FRAME_TYPE
+ | bne <2 // Trace stitching continuation?
+ | // Otherwise set KBASE for Lua function below fast function.
+ | ldr CARG3w, [CARG1, #-4]
+ | decode_RA CARG1, CARG3
+ | sub CARG2, BASE, CARG1, lsl #3
+ | ldr LFUNC:CARG3, [CARG2, #-32]
+ | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
+ | ldr CARG3, LFUNC:CARG3->pc
+ | ldr KBASE, [CARG3, #PC2PROTO(k)]
+ | b <2
+ |
+ |5: // Dispatch to static entry of original ins replaced by BC_JLOOP.
+ | ldr RA, [GL, #GL_J(trace)]
+ | decode_RD RC, INS
+ | ldr TRACE:RA, [RA, RC, lsl #3]
+ | ldr INSw, TRACE:RA->startins
+ | add TMP0, GL, INS, uxtb #3
+ | decode_RA RA, INS
+ | ldr RB, [TMP0, #GG_G2DISP+GG_DISP2STATIC]
+ | decode_RD RC, INS
+ | br_auth RB
+ |
+ |9: // Rethrow error from the right C frame.
+ | neg CARG2w, CARG1w
+ | mov CARG1, L
+ | bl extern lj_err_trace // (lua_State *L, int errcode)
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Math helper functions ----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ | // int lj_vm_modi(int dividend, int divisor);
+ |->vm_modi:
+ | eor CARG4w, CARG1w, CARG2w
+ | cmp CARG4w, #0
+ | eor CARG3w, CARG1w, CARG1w, asr #31
+ | eor CARG4w, CARG2w, CARG2w, asr #31
+ | sub CARG3w, CARG3w, CARG1w, asr #31
+ | sub CARG4w, CARG4w, CARG2w, asr #31
+ | udiv CARG1w, CARG3w, CARG4w
+ | msub CARG1w, CARG1w, CARG4w, CARG3w
+ | ccmp CARG1w, #0, #4, mi
+ | sub CARG3w, CARG1w, CARG4w
+ | csel CARG1w, CARG1w, CARG3w, eq
+ | eor CARG3w, CARG1w, CARG2w
+ | cmp CARG3w, #0
+ | cneg CARG1w, CARG1w, mi
+ | ret
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Miscellaneous functions --------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.define NEXT_TAB, TAB:CARG1
+ |.define NEXT_RES, CARG1
+ |.define NEXT_IDX, CARG2w
+ |.define NEXT_LIM, CARG3w
+ |.define NEXT_TMP0, TMP0
+ |.define NEXT_TMP0w, TMP0w
+ |.define NEXT_TMP1, TMP1
+ |.define NEXT_TMP1w, TMP1w
+ |.define NEXT_RES_PTR, sp
+ |.define NEXT_RES_VAL, [sp]
+ |.define NEXT_RES_KEY, [sp, #8]
+ |
+ |// TValue *lj_vm_next(GCtab *t, uint32_t idx)
+ |// Next idx returned in CRET2w.
+ |->vm_next:
+ |.if JIT
+ | ldr NEXT_LIM, NEXT_TAB->asize
+ | ldr NEXT_TMP1, NEXT_TAB->array
+ |1: // Traverse array part.
+ | subs NEXT_TMP0w, NEXT_IDX, NEXT_LIM
+ | bhs >5 // Index points after array part?
+ | ldr NEXT_TMP0, [NEXT_TMP1, NEXT_IDX, uxtw #3]
+ | cmn NEXT_TMP0, #-LJ_TNIL
+ | cinc NEXT_IDX, NEXT_IDX, eq
+ | beq <1 // Skip holes in array part.
+ | str NEXT_TMP0, NEXT_RES_VAL
+ | movz NEXT_TMP0w, #(LJ_TISNUM>>1)&0xffff, lsl #16
+ | stp NEXT_IDX, NEXT_TMP0w, NEXT_RES_KEY
+ | add NEXT_IDX, NEXT_IDX, #1
+ | mov NEXT_RES, NEXT_RES_PTR
+ |4:
+ | ret
+ |
+ |5: // Traverse hash part.
+ | ldr NEXT_TMP1w, NEXT_TAB->hmask
+ | ldr NODE:NEXT_RES, NEXT_TAB->node
+ | add NEXT_TMP0w, NEXT_TMP0w, NEXT_TMP0w, lsl #1
+ | add NEXT_LIM, NEXT_LIM, NEXT_TMP1w
+ | add NODE:NEXT_RES, NODE:NEXT_RES, NEXT_TMP0w, uxtw #3
+ |6:
+ | cmp NEXT_IDX, NEXT_LIM
+ | bhi >9
+ | ldr NEXT_TMP0, NODE:NEXT_RES->val
+ | cmn NEXT_TMP0, #-LJ_TNIL
+ | add NEXT_IDX, NEXT_IDX, #1
+ | bne <4
+ | // Skip holes in hash part.
+ | add NODE:NEXT_RES, NODE:NEXT_RES, #sizeof(Node)
+ | b <6
+ |
+ |9: // End of iteration. Set the key to nil (not the value).
+ | movn NEXT_TMP0, #0
+ | str NEXT_TMP0, NEXT_RES_KEY
+ | mov NEXT_RES, NEXT_RES_PTR
+ | ret
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- FFI helper functions -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// Handler for callback functions.
+ |// Saveregs already performed. Callback slot number in w9, g in x10.
+ |->vm_ffi_callback:
+ |.if FFI
+ |.type CTSTATE, CTState, PC
+ | saveregs
+ | ldr CTSTATE, GL:x10->ctype_state
+ | mov GL, x10
+ | add x10, sp, # CFRAME_SPACE
+ | str w9, CTSTATE->cb.slot
+ | stp x0, x1, CTSTATE->cb.gpr[0]
+ | stp d0, d1, CTSTATE->cb.fpr[0]
+ | stp x2, x3, CTSTATE->cb.gpr[2]
+ | stp d2, d3, CTSTATE->cb.fpr[2]
+ | stp x4, x5, CTSTATE->cb.gpr[4]
+ | stp d4, d5, CTSTATE->cb.fpr[4]
+ | stp x6, x7, CTSTATE->cb.gpr[6]
+ | stp d6, d7, CTSTATE->cb.fpr[6]
+ | str x10, CTSTATE->cb.stack
+ | mov CARG1, CTSTATE
+ | str CTSTATE, SAVE_PC // Any value outside of bytecode is ok.
+ | mov CARG2, sp
+ | bl extern lj_ccallback_enter // (CTState *cts, void *cf)
+ | // Returns lua_State *.
+ | ldp BASE, RC, L:CRET1->base
+ | init_constants
+ | mov L, CRET1
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
+ | sub RC, RC, BASE
+ | st_vmstate ST_INTERP
+ | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
+ | ins_callt
+ |.endif
+ |
+ |->cont_ffi_callback: // Return from FFI callback.
+ |.if FFI
+ | ldr CTSTATE, GL->ctype_state
+ | stp BASE, CARG4, L->base
+ | str L, CTSTATE->L
+ | mov CARG1, CTSTATE
+ | mov CARG2, RA
+ | bl extern lj_ccallback_leave // (CTState *cts, TValue *o)
+ | ldp x0, x1, CTSTATE->cb.gpr[0]
+ | ldp d0, d1, CTSTATE->cb.fpr[0]
+ | b ->vm_leave_unw
+ |.endif
+ |
+ |->vm_ffi_call: // Call C function via FFI.
+ | // Caveat: needs special frame unwinding, see below.
+ |.if FFI
+ | .type CCSTATE, CCallState, x19
+ | sp_auth
+ | stp_unwind CCSTATE, x20, [sp, #-32]!
+ | stp fp, lr, [sp, #16]
+ | add fp, sp, #16
+ | mov CCSTATE, x0
+ | ldr TMP0w, CCSTATE:x0->spadj
+ | ldrb TMP1w, CCSTATE->nsp
+ | add TMP2, CCSTATE, #offsetof(CCallState, stack)
+ | subs TMP1, TMP1, #8
+ | ldr TMP3, CCSTATE->func
+ | sub sp, sp, TMP0
+ | bmi >2
+ |1: // Copy stack slots
+ | ldr TMP0, [TMP2, TMP1]
+ | str TMP0, [sp, TMP1]
+ | subs TMP1, TMP1, #8
+ | bpl <1
+ |2:
+ | ldp x0, x1, CCSTATE->gpr[0]
+ | ldp d0, d1, CCSTATE->fpr[0]
+ | ldp x2, x3, CCSTATE->gpr[2]
+ | ldp d2, d3, CCSTATE->fpr[2]
+ | ldp x4, x5, CCSTATE->gpr[4]
+ | ldp d4, d5, CCSTATE->fpr[4]
+ | ldp x6, x7, CCSTATE->gpr[6]
+ | ldp d6, d7, CCSTATE->fpr[6]
+ | ldr x8, CCSTATE->retp
+ | blr_auth TMP3
+ | sub sp, fp, #16
+ | stp x0, x1, CCSTATE->gpr[0]
+ | stp d0, d1, CCSTATE->fpr[0]
+ | stp d2, d3, CCSTATE->fpr[2]
+ | ldp fp, lr, [sp, #16]
+ | ldp_unwind CCSTATE, x20, [sp], #32
+ | ret_auth
+ |.endif
+ |// Note: vm_ffi_call must be the last function in this object file!
+ |
+ |//-----------------------------------------------------------------------
+}
+
+/* Generate the code for a single instruction. */
+static void build_ins(BuildCtx *ctx, BCOp op, int defop)
+{
+ int vk = 0;
+ |=>defop:
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ /* Remember: all ops branch for a true comparison, fall through otherwise. */
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+ | // RA = src1, RC = src2, JMP with RC = target
+ | ldr CARG1, [BASE, RA, lsl #3]
+ | ldrh RBw, [PC, # OFS_RD]
+ | ldr CARG2, [BASE, RC, lsl #3]
+ | add PC, PC, #4
+ | add RB, PC, RB, lsl #2
+ | sub RB, RB, #0x20000
+ | checkint CARG1, >3
+ | checkint CARG2, >4
+ | cmp CARG1w, CARG2w
+ if (op == BC_ISLT) {
+ | csel PC, RB, PC, lt
+ } else if (op == BC_ISGE) {
+ | csel PC, RB, PC, ge
+ } else if (op == BC_ISLE) {
+ | csel PC, RB, PC, le
+ } else {
+ | csel PC, RB, PC, gt
+ }
+ |1:
+ | ins_next
+ |
+ |3: // RA not int.
+ | ldr FARG1, [BASE, RA, lsl #3]
+ | blo ->vmeta_comp
+ | ldr FARG2, [BASE, RC, lsl #3]
+ | cmp TISNUMhi, CARG2, lsr #32
+ | bhi >5
+ | bne ->vmeta_comp
+ | // RA number, RC int.
+ | scvtf FARG2, CARG2w
+ | b >5
+ |
+ |4: // RA int, RC not int
+ | ldr FARG2, [BASE, RC, lsl #3]
+ | blo ->vmeta_comp
+ | // RA int, RC number.
+ | scvtf FARG1, CARG1w
+ |
+ |5: // RA number, RC number
+ | fcmp FARG1, FARG2
+ | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't.
+ if (op == BC_ISLT) {
+ | csel PC, RB, PC, lo
+ } else if (op == BC_ISGE) {
+ | csel PC, RB, PC, hs
+ } else if (op == BC_ISLE) {
+ | csel PC, RB, PC, ls
+ } else {
+ | csel PC, RB, PC, hi
+ }
+ | b <1
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ vk = op == BC_ISEQV;
+ | // RA = src1, RC = src2, JMP with RC = target
+ | ldr CARG1, [BASE, RA, lsl #3]
+ | add RC, BASE, RC, lsl #3
+ | ldrh RBw, [PC, # OFS_RD]
+ | ldr CARG3, [RC]
+ | add PC, PC, #4
+ | add RB, PC, RB, lsl #2
+ | sub RB, RB, #0x20000
+ | asr ITYPE, CARG3, #47
+ | cmn ITYPE, #-LJ_TISNUM
+ if (vk) {
+ | bls ->BC_ISEQN_Z
+ } else {
+ | bls ->BC_ISNEN_Z
+ }
+ | // RC is not a number.
+ | asr TMP0, CARG1, #47
+ |.if FFI
+ | // Check if RC or RA is a cdata.
+ | cmn ITYPE, #-LJ_TCDATA
+ | ccmn TMP0, #-LJ_TCDATA, #4, ne
+ | beq ->vmeta_equal_cd
+ |.endif
+ | cmp CARG1, CARG3
+ | bne >2
+ | // Tag and value are equal.
+ if (vk) {
+ |->BC_ISEQV_Z:
+ | mov PC, RB // Perform branch.
+ }
+ |1:
+ | ins_next
+ |
+ |2: // Check if the tags are the same and it's a table or userdata.
+ | cmp ITYPE, TMP0
+ | ccmn ITYPE, #-LJ_TISTABUD, #2, eq
+ if (vk) {
+ | bhi <1
+ } else {
+ | bhi ->BC_ISEQV_Z // Reuse code from opposite instruction.
+ }
+ | // Different tables or userdatas. Need to check __eq metamethod.
+ | // Field metatable must be at same offset for GCtab and GCudata!
+ | and TAB:CARG2, CARG1, #LJ_GCVMASK
+ | ldr TAB:TMP2, TAB:CARG2->metatable
+ if (vk) {
+ | cbz TAB:TMP2, <1 // No metatable?
+ | ldrb TMP1w, TAB:TMP2->nomm
+ | mov CARG4, #0 // ne = 0
+ | tbnz TMP1w, #MM_eq, <1 // 'no __eq' flag set: done.
+ } else {
+ | cbz TAB:TMP2, ->BC_ISEQV_Z // No metatable?
+ | ldrb TMP1w, TAB:TMP2->nomm
+ | mov CARG4, #1 // ne = 1.
+ | tbnz TMP1w, #MM_eq, ->BC_ISEQV_Z // 'no __eq' flag set: done.
+ }
+ | b ->vmeta_equal
+ break;
+
+ case BC_ISEQS: case BC_ISNES:
+ vk = op == BC_ISEQS;
+ | // RA = src, RC = str_const (~), JMP with RC = target
+ | ldr CARG1, [BASE, RA, lsl #3]
+ | mvn RC, RC
+ | ldrh RBw, [PC, # OFS_RD]
+ | ldr CARG2, [KBASE, RC, lsl #3]
+ | add PC, PC, #4
+ | movn TMP0, #~LJ_TSTR
+ |.if FFI
+ | asr ITYPE, CARG1, #47
+ |.endif
+ | add RB, PC, RB, lsl #2
+ | add CARG2, CARG2, TMP0, lsl #47
+ | sub RB, RB, #0x20000
+ |.if FFI
+ | cmn ITYPE, #-LJ_TCDATA
+ | beq ->vmeta_equal_cd
+ |.endif
+ | cmp CARG1, CARG2
+ if (vk) {
+ | csel PC, RB, PC, eq
+ } else {
+ | csel PC, RB, PC, ne
+ }
+ | ins_next
+ break;
+
+ case BC_ISEQN: case BC_ISNEN:
+ vk = op == BC_ISEQN;
+ | // RA = src, RC = num_const (~), JMP with RC = target
+ | ldr CARG1, [BASE, RA, lsl #3]
+ | add RC, KBASE, RC, lsl #3
+ | ldrh RBw, [PC, # OFS_RD]
+ | ldr CARG3, [RC]
+ | add PC, PC, #4
+ | add RB, PC, RB, lsl #2
+ | sub RB, RB, #0x20000
+ if (vk) {
+ |->BC_ISEQN_Z:
+ } else {
+ |->BC_ISNEN_Z:
+ }
+ | checkint CARG1, >4
+ | checkint CARG3, >6
+ | cmp CARG1w, CARG3w
+ |1:
+ if (vk) {
+ | csel PC, RB, PC, eq
+ |2:
+ } else {
+ |2:
+ | csel PC, RB, PC, ne
+ }
+ |3:
+ | ins_next
+ |
+ |4: // RA not int.
+ |.if FFI
+ | blo >7
+ |.else
+ | blo <2
+ |.endif
+ | ldr FARG1, [BASE, RA, lsl #3]
+ | ldr FARG2, [RC]
+ | cmp TISNUMhi, CARG3, lsr #32
+ | bne >5
+ | // RA number, RC int.
+ | scvtf FARG2, CARG3w
+ |5:
+ | // RA number, RC number.
+ | fcmp FARG1, FARG2
+ | b <1
+ |
+ |6: // RA int, RC number
+ | ldr FARG2, [RC]
+ | scvtf FARG1, CARG1w
+ | fcmp FARG1, FARG2
+ | b <1
+ |
+ |.if FFI
+ |7:
+ | asr ITYPE, CARG1, #47
+ | cmn ITYPE, #-LJ_TCDATA
+ | bne <2
+ | b ->vmeta_equal_cd
+ |.endif
+ break;
+
+ case BC_ISEQP: case BC_ISNEP:
+ vk = op == BC_ISEQP;
+ | // RA = src, RC = primitive_type (~), JMP with RC = target
+ | ldr TMP0, [BASE, RA, lsl #3]
+ | ldrh RBw, [PC, # OFS_RD]
+ | add PC, PC, #4
+ | add RC, RC, #1
+ | add RB, PC, RB, lsl #2
+ |.if FFI
+ | asr ITYPE, TMP0, #47
+ | cmn ITYPE, #-LJ_TCDATA
+ | beq ->vmeta_equal_cd
+ | cmn RC, ITYPE
+ |.else
+ | cmn RC, TMP0, asr #47
+ |.endif
+ | sub RB, RB, #0x20000
+ if (vk) {
+ | csel PC, RB, PC, eq
+ } else {
+ | csel PC, RB, PC, ne
+ }
+ | ins_next
+ break;
+
+ /* -- Unary test and copy ops ------------------------------------------- */
+
+ case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
+ | // RA = dst or unused, RC = src, JMP with RC = target
+ | ldrh RBw, [PC, # OFS_RD]
+ | ldr TMP0, [BASE, RC, lsl #3]
+ | add PC, PC, #4
+ | mov_false TMP1
+ | add RB, PC, RB, lsl #2
+ | cmp TMP0, TMP1
+ | sub RB, RB, #0x20000
+ if (op == BC_ISTC || op == BC_IST) {
+ if (op == BC_ISTC) {
+ | csel RA, RA, RC, lo
+ }
+ | csel PC, RB, PC, lo
+ } else {
+ if (op == BC_ISFC) {
+ | csel RA, RA, RC, hs
+ }
+ | csel PC, RB, PC, hs
+ }
+ if (op == BC_ISTC || op == BC_ISFC) {
+ | str TMP0, [BASE, RA, lsl #3]
+ }
+ | ins_next
+ break;
+
+ case BC_ISTYPE:
+ | // RA = src, RC = -type
+ | ldr TMP0, [BASE, RA, lsl #3]
+ | cmn RC, TMP0, asr #47
+ | bne ->vmeta_istype
+ | ins_next
+ break;
+ case BC_ISNUM:
+ | // RA = src, RC = -(TISNUM-1)
+ | ldr TMP0, [BASE, RA]
+ | checknum TMP0, ->vmeta_istype
+ | ins_next
+ break;
+
+ /* -- Unary ops --------------------------------------------------------- */
+
+ case BC_MOV:
+ | // RA = dst, RC = src
+ | ldr TMP0, [BASE, RC, lsl #3]
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ break;
+ case BC_NOT:
+ | // RA = dst, RC = src
+ | ldr TMP0, [BASE, RC, lsl #3]
+ | mov_false TMP1
+ | mov_true TMP2
+ | cmp TMP0, TMP1
+ | csel TMP0, TMP1, TMP2, lo
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ break;
+ case BC_UNM:
+ | // RA = dst, RC = src
+ | ldr TMP0, [BASE, RC, lsl #3]
+ | asr ITYPE, TMP0, #47
+ | cmn ITYPE, #-LJ_TISNUM
+ | bhi ->vmeta_unm
+ | eor TMP0, TMP0, #U64x(80000000,00000000)
+ | bne >5
+ | negs TMP0w, TMP0w
+ | movz CARG3, #0x41e0, lsl #48 // 2^31.
+ | add_TISNUM TMP0, TMP0
+ | csel TMP0, TMP0, CARG3, vc
+ |5:
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ break;
+ case BC_LEN:
+ | // RA = dst, RC = src
+ | ldr CARG1, [BASE, RC, lsl #3]
+ | asr ITYPE, CARG1, #47
+ | cmn ITYPE, #-LJ_TSTR
+ | and CARG1, CARG1, #LJ_GCVMASK
+ | bne >2
+ | ldr CARG1w, STR:CARG1->len
+ |1:
+ | add_TISNUM CARG1, CARG1
+ | str CARG1, [BASE, RA, lsl #3]
+ | ins_next
+ |
+ |2:
+ | cmn ITYPE, #-LJ_TTAB
+ | bne ->vmeta_len
+#if LJ_52
+ | ldr TAB:CARG2, TAB:CARG1->metatable
+ | cbnz TAB:CARG2, >9
+ |3:
+#endif
+ |->BC_LEN_Z:
+ | bl extern lj_tab_len // (GCtab *t)
+ | // Returns uint32_t (but less than 2^31).
+ | b <1
+ |
+#if LJ_52
+ |9:
+ | ldrb TMP1w, TAB:CARG2->nomm
+ | tbnz TMP1w, #MM_len, <3 // 'no __len' flag set: done.
+ | b ->vmeta_len
+#endif
+ break;
+
+ /* -- Binary ops -------------------------------------------------------- */
+
+ |.macro ins_arithcheck_int, target
+ | checkint CARG1, target
+ | checkint CARG2, target
+ |.endmacro
+ |
+ |.macro ins_arithcheck_num, target
+ | checknum CARG1, target
+ | checknum CARG2, target
+ |.endmacro
+ |
+ |.macro ins_arithcheck_nzdiv, target
+ | cbz CARG2w, target
+ |.endmacro
+ |
+ |.macro ins_arithhead
+ ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
+ ||if (vk == 1) {
+ | and RC, RC, #255
+ | decode_RB RB, INS
+ ||} else {
+ | decode_RB RB, INS
+ | and RC, RC, #255
+ ||}
+ |.endmacro
+ |
+ |.macro ins_arithload, reg1, reg2
+ | // RA = dst, RB = src1, RC = src2 | num_const
+ ||switch (vk) {
+ ||case 0:
+ | ldr reg1, [BASE, RB, lsl #3]
+ | ldr reg2, [KBASE, RC, lsl #3]
+ || break;
+ ||case 1:
+ | ldr reg1, [KBASE, RC, lsl #3]
+ | ldr reg2, [BASE, RB, lsl #3]
+ || break;
+ ||default:
+ | ldr reg1, [BASE, RB, lsl #3]
+ | ldr reg2, [BASE, RC, lsl #3]
+ || break;
+ ||}
+ |.endmacro
+ |
+ |.macro ins_arithfallback, ins
+ ||switch (vk) {
+ ||case 0:
+ | ins ->vmeta_arith_vn
+ || break;
+ ||case 1:
+ | ins ->vmeta_arith_nv
+ || break;
+ ||default:
+ | ins ->vmeta_arith_vv
+ || break;
+ ||}
+ |.endmacro
+ |
+ |.macro ins_arithmod, res, reg1, reg2
+ | fdiv d2, reg1, reg2
+ | frintm d2, d2
+ | // Cannot use fmsub, because FMA is not enabled by default.
+ | fmul d2, d2, reg2
+ | fsub res, reg1, d2
+ |.endmacro
+ |
+ |.macro ins_arithdn, intins, fpins
+ | ins_arithhead
+ | ins_arithload CARG1, CARG2
+ | ins_arithcheck_int >5
+ |.if "intins" == "smull"
+ | smull CARG1, CARG1w, CARG2w
+ | cmp CARG1, CARG1, sxtw
+ | mov CARG1w, CARG1w
+ | ins_arithfallback bne
+ |.elif "intins" == "ins_arithmodi"
+ | ins_arithfallback ins_arithcheck_nzdiv
+ | bl ->vm_modi
+ |.else
+ | intins CARG1w, CARG1w, CARG2w
+ | ins_arithfallback bvs
+ |.endif
+ | add_TISNUM CARG1, CARG1
+ | str CARG1, [BASE, RA, lsl #3]
+ |4:
+ | ins_next
+ |
+ |5: // FP variant.
+ | ins_arithload FARG1, FARG2
+ | ins_arithfallback ins_arithcheck_num
+ | fpins FARG1, FARG1, FARG2
+ | str FARG1, [BASE, RA, lsl #3]
+ | b <4
+ |.endmacro
+ |
+ |.macro ins_arithfp, fpins
+ | ins_arithhead
+ | ins_arithload CARG1, CARG2
+ | ins_arithload FARG1, FARG2
+ | ins_arithfallback ins_arithcheck_num
+ |.if "fpins" == "fpow"
+ | bl extern pow
+ |.else
+ | fpins FARG1, FARG1, FARG2
+ |.endif
+ | str FARG1, [BASE, RA, lsl #3]
+ | ins_next
+ |.endmacro
+
+ case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
+ | ins_arithdn adds, fadd
+ break;
+ case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
+ | ins_arithdn subs, fsub
+ break;
+ case BC_MULVN: case BC_MULNV: case BC_MULVV:
+ | ins_arithdn smull, fmul
+ break;
+ case BC_DIVVN: case BC_DIVNV: case BC_DIVVV:
+ | ins_arithfp fdiv
+ break;
+ case BC_MODVN: case BC_MODNV: case BC_MODVV:
+ | ins_arithdn ins_arithmodi, ins_arithmod
+ break;
+ case BC_POW:
+ | // NYI: (partial) integer arithmetic.
+ | ins_arithfp fpow
+ break;
+
+ case BC_CAT:
+ | decode_RB RB, INS
+ | and RC, RC, #255
+ | // RA = dst, RB = src_start, RC = src_end
+ | str BASE, L->base
+ | sub CARG3, RC, RB
+ | add CARG2, BASE, RC, lsl #3
+ |->BC_CAT_Z:
+ | // RA = dst, CARG2 = top-1, CARG3 = left
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_meta_cat // (lua_State *L, TValue *top, int left)
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | ldrb RBw, [PC, #-4+OFS_RB]
+ | ldr BASE, L->base
+ | cbnz CRET1, ->vmeta_binop
+ | ldr TMP0, [BASE, RB, lsl #3]
+ | str TMP0, [BASE, RA, lsl #3] // Copy result to RA.
+ | ins_next
+ break;
+
+ /* -- Constant ops ------------------------------------------------------ */
+
+ case BC_KSTR:
+ | // RA = dst, RC = str_const (~)
+ | mvn RC, RC
+ | ldr TMP0, [KBASE, RC, lsl #3]
+ | movn TMP1, #~LJ_TSTR
+ | add TMP0, TMP0, TMP1, lsl #47
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ break;
+ case BC_KCDATA:
+ |.if FFI
+ | // RA = dst, RC = cdata_const (~)
+ | mvn RC, RC
+ | ldr TMP0, [KBASE, RC, lsl #3]
+ | movn TMP1, #~LJ_TCDATA
+ | add TMP0, TMP0, TMP1, lsl #47
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ |.endif
+ break;
+ case BC_KSHORT:
+ | // RA = dst, RC = int16_literal
+ | sxth RCw, RCw
+ | add_TISNUM TMP0, RC
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ break;
+ case BC_KNUM:
+ | // RA = dst, RC = num_const
+ | ldr TMP0, [KBASE, RC, lsl #3]
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ break;
+ case BC_KPRI:
+ | // RA = dst, RC = primitive_type (~)
+ | mvn TMP0, RC, lsl #47
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ break;
+ case BC_KNIL:
+ | // RA = base, RC = end
+ | add RA, BASE, RA, lsl #3
+ | add RC, BASE, RC, lsl #3
+ | str TISNIL, [RA], #8
+ |1:
+ | cmp RA, RC
+ | str TISNIL, [RA], #8
+ | blt <1
+ | ins_next_
+ break;
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ | // RA = dst, RC = uvnum
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | add RC, RC, #offsetof(GCfuncL, uvptr)/8
+ | and LFUNC:CARG2, CARG2, #LJ_GCVMASK
+ | ldr UPVAL:CARG2, [LFUNC:CARG2, RC, lsl #3]
+ | ldr CARG2, UPVAL:CARG2->v
+ | ldr TMP0, [CARG2]
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ break;
+ case BC_USETV:
+ | // RA = uvnum, RC = src
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | add RA, RA, #offsetof(GCfuncL, uvptr)/8
+ | and LFUNC:CARG2, CARG2, #LJ_GCVMASK
+ | ldr UPVAL:CARG1, [LFUNC:CARG2, RA, lsl #3]
+ | ldr CARG3, [BASE, RC, lsl #3]
+ | ldr CARG2, UPVAL:CARG1->v
+ | ldrb TMP2w, UPVAL:CARG1->marked
+ | ldrb TMP0w, UPVAL:CARG1->closed
+ | asr ITYPE, CARG3, #47
+ | str CARG3, [CARG2]
+ | add ITYPE, ITYPE, #-LJ_TISGCV
+ | tst TMP2w, #LJ_GC_BLACK // isblack(uv)
+ | ccmp TMP0w, #0, #4, ne // && uv->closed
+ | ccmn ITYPE, #-(LJ_TNUMX - LJ_TISGCV), #0, ne // && tvisgcv(v)
+ | bhi >2
+ |1:
+ | ins_next
+ |
+ |2: // Check if new value is white.
+ | and GCOBJ:CARG3, CARG3, #LJ_GCVMASK
+ | ldrb TMP1w, GCOBJ:CARG3->gch.marked
+ | tst TMP1w, #LJ_GC_WHITES // iswhite(str)
+ | beq <1
+ | // Crossed a write barrier. Move the barrier forward.
+ | mov CARG1, GL
+ | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ | b <1
+ break;
+ case BC_USETS:
+ | // RA = uvnum, RC = str_const (~)
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | add RA, RA, #offsetof(GCfuncL, uvptr)/8
+ | mvn RC, RC
+ | and LFUNC:CARG2, CARG2, #LJ_GCVMASK
+ | ldr UPVAL:CARG1, [LFUNC:CARG2, RA, lsl #3]
+ | ldr STR:CARG3, [KBASE, RC, lsl #3]
+ | movn TMP0, #~LJ_TSTR
+ | ldr CARG2, UPVAL:CARG1->v
+ | ldrb TMP2w, UPVAL:CARG1->marked
+ | add TMP0, STR:CARG3, TMP0, lsl #47
+ | ldrb TMP1w, STR:CARG3->marked
+ | str TMP0, [CARG2]
+ | tbnz TMP2w, #2, >2 // isblack(uv)
+ |1:
+ | ins_next
+ |
+ |2: // Check if string is white and ensure upvalue is closed.
+ | ldrb TMP0w, UPVAL:CARG1->closed
+ | tst TMP1w, #LJ_GC_WHITES // iswhite(str)
+ | ccmp TMP0w, #0, #4, ne
+ | beq <1
+ | // Crossed a write barrier. Move the barrier forward.
+ | mov CARG1, GL
+ | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ | b <1
+ break;
+ case BC_USETN:
+ | // RA = uvnum, RC = num_const
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | add RA, RA, #offsetof(GCfuncL, uvptr)/8
+ | and LFUNC:CARG2, CARG2, #LJ_GCVMASK
+ | ldr UPVAL:CARG2, [LFUNC:CARG2, RA, lsl #3]
+ | ldr TMP0, [KBASE, RC, lsl #3]
+ | ldr CARG2, UPVAL:CARG2->v
+ | str TMP0, [CARG2]
+ | ins_next
+ break;
+ case BC_USETP:
+ | // RA = uvnum, RC = primitive_type (~)
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | add RA, RA, #offsetof(GCfuncL, uvptr)/8
+ | and LFUNC:CARG2, CARG2, #LJ_GCVMASK
+ | ldr UPVAL:CARG2, [LFUNC:CARG2, RA, lsl #3]
+ | mvn TMP0, RC, lsl #47
+ | ldr CARG2, UPVAL:CARG2->v
+ | str TMP0, [CARG2]
+ | ins_next
+ break;
+
+ case BC_UCLO:
+ | // RA = level, RC = target
+ | ldr CARG3, L->openupval
+ | add RC, PC, RC, lsl #2
+ | str BASE, L->base
+ | sub PC, RC, #0x20000
+ | cbz CARG3, >1
+ | mov CARG1, L
+ | add CARG2, BASE, RA, lsl #3
+ | bl extern lj_func_closeuv // (lua_State *L, TValue *level)
+ | ldr BASE, L->base
+ |1:
+ | ins_next
+ break;
+
+ case BC_FNEW:
+ | // RA = dst, RC = proto_const (~) (holding function prototype)
+ | mvn RC, RC
+ | str BASE, L->base
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
+ | str PC, SAVE_PC
+ | ldr CARG2, [KBASE, RC, lsl #3]
+ | mov CARG1, L
+ | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
+ | // (lua_State *L, GCproto *pt, GCfuncL *parent)
+ | bl extern lj_func_newL_gc
+ | // Returns GCfuncL *.
+ | ldr BASE, L->base
+ | movn TMP0, #~LJ_TFUNC
+ | add CRET1, CRET1, TMP0, lsl #47
+ | str CRET1, [BASE, RA, lsl #3]
+ | ins_next
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_TNEW:
+ case BC_TDUP:
+ | // RA = dst, RC = (hbits|asize) | tab_const (~)
+ | ldp CARG3, CARG4, GL->gc.total // Assumes threshold follows total.
+ | str BASE, L->base
+ | str PC, SAVE_PC
+ | mov CARG1, L
+ | cmp CARG3, CARG4
+ | bhs >5
+ |1:
+ if (op == BC_TNEW) {
+ | and CARG2, RC, #0x7ff
+ | lsr CARG3, RC, #11
+ | cmp CARG2, #0x7ff
+ | mov TMP0, #0x801
+ | csel CARG2, CARG2, TMP0, ne
+ | bl extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits)
+ | // Returns GCtab *.
+ } else {
+ | mvn RC, RC
+ | ldr CARG2, [KBASE, RC, lsl #3]
+ | bl extern lj_tab_dup // (lua_State *L, Table *kt)
+ | // Returns GCtab *.
+ }
+ | ldr BASE, L->base
+ | movk CRET1, #(LJ_TTAB>>1)&0xffff, lsl #48
+ | str CRET1, [BASE, RA, lsl #3]
+ | ins_next
+ |
+ |5:
+ | bl extern lj_gc_step_fixtop // (lua_State *L)
+ | mov CARG1, L
+ | b <1
+ break;
+
+ case BC_GGET:
+ | // RA = dst, RC = str_const (~)
+ case BC_GSET:
+ | // RA = src, RC = str_const (~)
+ | ldr LFUNC:CARG1, [BASE, FRAME_FUNC]
+ | mvn RC, RC
+ | and LFUNC:CARG1, CARG1, #LJ_GCVMASK
+ | ldr TAB:CARG2, LFUNC:CARG1->env
+ | ldr STR:RC, [KBASE, RC, lsl #3]
+ if (op == BC_GGET) {
+ | b ->BC_TGETS_Z
+ } else {
+ | b ->BC_TSETS_Z
+ }
+ break;
+
+ case BC_TGETV:
+ | decode_RB RB, INS
+ | and RC, RC, #255
+ | // RA = dst, RB = table, RC = key
+ | ldr CARG2, [BASE, RB, lsl #3]
+ | ldr TMP1, [BASE, RC, lsl #3]
+ | checktab CARG2, ->vmeta_tgetv
+ | checkint TMP1, >9 // Integer key?
+ | ldr CARG3, TAB:CARG2->array
+ | ldr CARG1w, TAB:CARG2->asize
+ | add CARG3, CARG3, TMP1, uxtw #3
+ | cmp TMP1w, CARG1w // In array part?
+ | bhs ->vmeta_tgetv
+ | ldr TMP0, [CARG3]
+ | cmp_nil TMP0
+ | beq >5
+ |1:
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ |
+ |5: // Check for __index if table value is nil.
+ | ldr TAB:CARG1, TAB:CARG2->metatable
+ | cbz TAB:CARG1, <1 // No metatable: done.
+ | ldrb TMP1w, TAB:CARG1->nomm
+ | tbnz TMP1w, #MM_index, <1 // 'no __index' flag set: done.
+ | b ->vmeta_tgetv
+ |
+ |9:
+ | asr ITYPE, TMP1, #47
+ | cmn ITYPE, #-LJ_TSTR // String key?
+ | bne ->vmeta_tgetv
+ | and STR:RC, TMP1, #LJ_GCVMASK
+ | b ->BC_TGETS_Z
+ break;
+ case BC_TGETS:
+ | decode_RB RB, INS
+ | and RC, RC, #255
+ | // RA = dst, RB = table, RC = str_const (~)
+ | ldr CARG2, [BASE, RB, lsl #3]
+ | mvn RC, RC
+ | ldr STR:RC, [KBASE, RC, lsl #3]
+ | checktab CARG2, ->vmeta_tgets1
+ |->BC_TGETS_Z:
+ | // TAB:CARG2 = GCtab *, STR:RC = GCstr *, RA = dst
+ | ldr TMP1w, TAB:CARG2->hmask
+ | ldr TMP2w, STR:RC->sid
+ | ldr NODE:CARG3, TAB:CARG2->node
+ | and TMP1w, TMP1w, TMP2w // idx = str->sid & tab->hmask
+ | add TMP1, TMP1, TMP1, lsl #1
+ | movn CARG4, #~LJ_TSTR
+ | add NODE:CARG3, NODE:CARG3, TMP1, lsl #3 // node = tab->node + idx*3*8
+ | add CARG4, STR:RC, CARG4, lsl #47 // Tagged key to look for.
+ |1:
+ | ldp TMP0, CARG1, NODE:CARG3->val
+ | ldr NODE:CARG3, NODE:CARG3->next
+ | cmp CARG1, CARG4
+ | bne >4
+ | cmp_nil TMP0
+ | beq >5
+ |3:
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ |
+ |4: // Follow hash chain.
+ | cbnz NODE:CARG3, <1
+ | // End of hash chain: key not found, nil result.
+ | mov_nil TMP0
+ |
+ |5: // Check for __index if table value is nil.
+ | ldr TAB:CARG1, TAB:CARG2->metatable
+ | cbz TAB:CARG1, <3 // No metatable: done.
+ | ldrb TMP1w, TAB:CARG1->nomm
+ | tbnz TMP1w, #MM_index, <3 // 'no __index' flag set: done.
+ | b ->vmeta_tgets
+ break;
+ case BC_TGETB:
+ | decode_RB RB, INS
+ | and RC, RC, #255
+ | // RA = dst, RB = table, RC = index
+ | ldr CARG2, [BASE, RB, lsl #3]
+ | checktab CARG2, ->vmeta_tgetb
+ | ldr CARG3, TAB:CARG2->array
+ | ldr CARG1w, TAB:CARG2->asize
+ | add CARG3, CARG3, RC, lsl #3
+ | cmp RCw, CARG1w // In array part?
+ | bhs ->vmeta_tgetb
+ | ldr TMP0, [CARG3]
+ | cmp_nil TMP0
+ | beq >5
+ |1:
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ |
+ |5: // Check for __index if table value is nil.
+ | ldr TAB:CARG1, TAB:CARG2->metatable
+ | cbz TAB:CARG1, <1 // No metatable: done.
+ | ldrb TMP1w, TAB:CARG1->nomm
+ | tbnz TMP1w, #MM_index, <1 // 'no __index' flag set: done.
+ | b ->vmeta_tgetb
+ break;
+ case BC_TGETR:
+ | decode_RB RB, INS
+ | and RC, RC, #255
+ | // RA = dst, RB = table, RC = key
+ | ldr CARG1, [BASE, RB, lsl #3]
+ | ldr TMP1, [BASE, RC, lsl #3]
+ | and TAB:CARG1, CARG1, #LJ_GCVMASK
+ | ldr CARG3, TAB:CARG1->array
+ | ldr TMP2w, TAB:CARG1->asize
+ | add CARG3, CARG3, TMP1w, uxtw #3
+ | cmp TMP1w, TMP2w // In array part?
+ | bhs ->vmeta_tgetr
+ | ldr TMP0, [CARG3]
+ |->BC_TGETR_Z:
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ break;
+
+ case BC_TSETV:
+ | decode_RB RB, INS
+ | and RC, RC, #255
+ | // RA = src, RB = table, RC = key
+ | ldr CARG2, [BASE, RB, lsl #3]
+ | ldr TMP1, [BASE, RC, lsl #3]
+ | checktab CARG2, ->vmeta_tsetv
+ | checkint TMP1, >9 // Integer key?
+ | ldr CARG3, TAB:CARG2->array
+ | ldr CARG1w, TAB:CARG2->asize
+ | add CARG3, CARG3, TMP1, uxtw #3
+ | cmp TMP1w, CARG1w // In array part?
+ | bhs ->vmeta_tsetv
+ | ldr TMP1, [CARG3]
+ | ldr TMP0, [BASE, RA, lsl #3]
+ | ldrb TMP2w, TAB:CARG2->marked
+ | cmp_nil TMP1 // Previous value is nil?
+ | beq >5
+ |1:
+ | str TMP0, [CARG3]
+ | tbnz TMP2w, #2, >7 // isblack(table)
+ |2:
+ | ins_next
+ |
+ |5: // Check for __newindex if previous value is nil.
+ | ldr TAB:CARG1, TAB:CARG2->metatable
+ | cbz TAB:CARG1, <1 // No metatable: done.
+ | ldrb TMP1w, TAB:CARG1->nomm
+ | tbnz TMP1w, #MM_newindex, <1 // 'no __newindex' flag set: done.
+ | b ->vmeta_tsetv
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:CARG2, TMP2w, TMP1
+ | b <2
+ |
+ |9:
+ | asr ITYPE, TMP1, #47
+ | cmn ITYPE, #-LJ_TSTR // String key?
+ | bne ->vmeta_tsetv
+ | and STR:RC, TMP1, #LJ_GCVMASK
+ | b ->BC_TSETS_Z
+ break;
+ case BC_TSETS:
+ | decode_RB RB, INS
+ | and RC, RC, #255
+ | // RA = dst, RB = table, RC = str_const (~)
+ | ldr CARG2, [BASE, RB, lsl #3]
+ | mvn RC, RC
+ | ldr STR:RC, [KBASE, RC, lsl #3]
+ | checktab CARG2, ->vmeta_tsets1
+ |->BC_TSETS_Z:
+ | // TAB:CARG2 = GCtab *, STR:RC = GCstr *, RA = src
+ | ldr TMP1w, TAB:CARG2->hmask
+ | ldr TMP2w, STR:RC->sid
+ | ldr NODE:CARG3, TAB:CARG2->node
+ | and TMP1w, TMP1w, TMP2w // idx = str->sid & tab->hmask
+ | add TMP1, TMP1, TMP1, lsl #1
+ | movn CARG4, #~LJ_TSTR
+ | add NODE:CARG3, NODE:CARG3, TMP1, lsl #3 // node = tab->node + idx*3*8
+ | add CARG4, STR:RC, CARG4, lsl #47 // Tagged key to look for.
+ | strb wzr, TAB:CARG2->nomm // Clear metamethod cache.
+ |1:
+ | ldp TMP1, CARG1, NODE:CARG3->val
+ | ldr NODE:TMP3, NODE:CARG3->next
+ | ldrb TMP2w, TAB:CARG2->marked
+ | cmp CARG1, CARG4
+ | bne >5
+ | ldr TMP0, [BASE, RA, lsl #3]
+ | cmp_nil TMP1 // Previous value is nil?
+ | beq >4
+ |2:
+ | str TMP0, NODE:CARG3->val
+ | tbnz TMP2w, #2, >7 // isblack(table)
+ |3:
+ | ins_next
+ |
+ |4: // Check for __newindex if previous value is nil.
+ | ldr TAB:CARG1, TAB:CARG2->metatable
+ | cbz TAB:CARG1, <2 // No metatable: done.
+ | ldrb TMP1w, TAB:CARG1->nomm
+ | tbnz TMP1w, #MM_newindex, <2 // 'no __newindex' flag set: done.
+ | b ->vmeta_tsets
+ |
+ |5: // Follow hash chain.
+ | mov NODE:CARG3, NODE:TMP3
+ | cbnz NODE:TMP3, <1
+ | // End of hash chain: key not found, add a new one.
+ |
+ | // But check for __newindex first.
+ | ldr TAB:CARG1, TAB:CARG2->metatable
+ | cbz TAB:CARG1, >6 // No metatable: continue.
+ | ldrb TMP1w, TAB:CARG1->nomm
+ | // 'no __newindex' flag NOT set: check.
+ | tbz TMP1w, #MM_newindex, ->vmeta_tsets
+ |6:
+ | movn TMP1, #~LJ_TSTR
+ | str PC, SAVE_PC
+ | add TMP0, STR:RC, TMP1, lsl #47
+ | str BASE, L->base
+ | mov CARG1, L
+ | str TMP0, TMPD
+ | add CARG3, sp, TMPDofs
+ | bl extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k)
+ | // Returns TValue *.
+ | ldr BASE, L->base
+ | ldr TMP0, [BASE, RA, lsl #3]
+ | str TMP0, [CRET1]
+ | b <3 // No 2nd write barrier needed.
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:CARG2, TMP2w, TMP1
+ | b <3
+ break;
+ case BC_TSETB:
+ | decode_RB RB, INS
+ | and RC, RC, #255
+ | // RA = src, RB = table, RC = index
+ | ldr CARG2, [BASE, RB, lsl #3]
+ | checktab CARG2, ->vmeta_tsetb
+ | ldr CARG3, TAB:CARG2->array
+ | ldr CARG1w, TAB:CARG2->asize
+ | add CARG3, CARG3, RC, lsl #3
+ | cmp RCw, CARG1w // In array part?
+ | bhs ->vmeta_tsetb
+ | ldr TMP1, [CARG3]
+ | ldr TMP0, [BASE, RA, lsl #3]
+ | ldrb TMP2w, TAB:CARG2->marked
+ | cmp_nil TMP1 // Previous value is nil?
+ | beq >5
+ |1:
+ | str TMP0, [CARG3]
+ | tbnz TMP2w, #2, >7 // isblack(table)
+ |2:
+ | ins_next
+ |
+ |5: // Check for __newindex if previous value is nil.
+ | ldr TAB:CARG1, TAB:CARG2->metatable
+ | cbz TAB:CARG1, <1 // No metatable: done.
+ | ldrb TMP1w, TAB:CARG1->nomm
+ | tbnz TMP1w, #MM_newindex, <1 // 'no __newindex' flag set: done.
+ | b ->vmeta_tsetb
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:CARG2, TMP2w, TMP1
+ | b <2
+ break;
+ case BC_TSETR:
+ | decode_RB RB, INS
+ | and RC, RC, #255
+ | // RA = src, RB = table, RC = key
+ | ldr CARG2, [BASE, RB, lsl #3]
+ | ldr TMP1, [BASE, RC, lsl #3]
+ | and TAB:CARG2, CARG2, #LJ_GCVMASK
+ | ldr CARG1, TAB:CARG2->array
+ | ldrb TMP2w, TAB:CARG2->marked
+ | ldr CARG4w, TAB:CARG2->asize
+ | add CARG1, CARG1, TMP1, uxtw #3
+ | tbnz TMP2w, #2, >7 // isblack(table)
+ |2:
+ | cmp TMP1w, CARG4w // In array part?
+ | bhs ->vmeta_tsetr
+ |->BC_TSETR_Z:
+ | ldr TMP0, [BASE, RA, lsl #3]
+ | str TMP0, [CARG1]
+ | ins_next
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:CARG2, TMP2w, TMP0
+ | b <2
+ break;
+
+ case BC_TSETM:
+ | // RA = base (table at base-1), RC = num_const (start index)
+ | add RA, BASE, RA, lsl #3
+ |1:
+ | ldr RBw, SAVE_MULTRES
+ | ldr TAB:CARG2, [RA, #-8] // Guaranteed to be a table.
+ | ldr TMP1, [KBASE, RC, lsl #3] // Integer constant is in lo-word.
+ | sub RB, RB, #8
+ | cbz RB, >4 // Nothing to copy?
+ | and TAB:CARG2, CARG2, #LJ_GCVMASK
+ | ldr CARG1w, TAB:CARG2->asize
+ | add CARG3w, TMP1w, RBw, lsr #3
+ | ldr CARG4, TAB:CARG2->array
+ | cmp CARG3, CARG1
+ | add RB, RA, RB
+ | bhi >5
+ | add TMP1, CARG4, TMP1w, uxtw #3
+ | ldrb TMP2w, TAB:CARG2->marked
+ |3: // Copy result slots to table.
+ | ldr TMP0, [RA], #8
+ | str TMP0, [TMP1], #8
+ | cmp RA, RB
+ | blo <3
+ | tbnz TMP2w, #2, >7 // isblack(table)
+ |4:
+ | ins_next
+ |
+ |5: // Need to resize array part.
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize)
+ | // Must not reallocate the stack.
+ | b <1
+ |
+ |7: // Possible table write barrier for any value. Skip valiswhite check.
+ | barrierback TAB:CARG2, TMP2w, TMP1
+ | b <4
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_CALLM:
+ | // RA = base, (RB = nresults+1,) RC = extra_nargs
+ | ldr TMP0w, SAVE_MULTRES
+ | decode_RC8RD NARGS8:RC, RC
+ | add NARGS8:RC, NARGS8:RC, TMP0
+ | b ->BC_CALL_Z
+ break;
+ case BC_CALL:
+ | decode_RC8RD NARGS8:RC, RC
+ | // RA = base, (RB = nresults+1,) RC = (nargs+1)*8
+ |->BC_CALL_Z:
+ | mov RB, BASE // Save old BASE for vmeta_call.
+ | add BASE, BASE, RA, lsl #3
+ | ldr CARG3, [BASE], #16
+ | sub NARGS8:RC, NARGS8:RC, #8
+ | checkfunc CARG3, ->vmeta_call
+ | ins_call
+ break;
+
+ case BC_CALLMT:
+ | // RA = base, (RB = 0,) RC = extra_nargs
+ | ldr TMP0w, SAVE_MULTRES
+ | add NARGS8:RC, TMP0, RC, lsl #3
+ | b ->BC_CALLT1_Z
+ break;
+ case BC_CALLT:
+ | lsl NARGS8:RC, RC, #3
+ | // RA = base, (RB = 0,) RC = (nargs+1)*8
+ |->BC_CALLT1_Z:
+ | add RA, BASE, RA, lsl #3
+ | ldr TMP1, [RA], #16
+ | sub NARGS8:RC, NARGS8:RC, #8
+ | checktp CARG3, TMP1, LJ_TFUNC, ->vmeta_callt
+ | ldr PC, [BASE, FRAME_PC]
+ |->BC_CALLT2_Z:
+ | mov RB, #0
+ | ldrb TMP2w, LFUNC:CARG3->ffid
+ | tst PC, #FRAME_TYPE
+ | bne >7
+ |1:
+ | str TMP1, [BASE, FRAME_FUNC] // Copy function down, but keep PC.
+ | cbz NARGS8:RC, >3
+ |2:
+ | ldr TMP0, [RA, RB]
+ | add TMP1, RB, #8
+ | cmp TMP1, NARGS8:RC
+ | str TMP0, [BASE, RB]
+ | mov RB, TMP1
+ | bne <2
+ |3:
+ | cmp TMP2, #1 // (> FF_C) Calling a fast function?
+ | bhi >5
+ |4:
+ | ins_callt
+ |
+ |5: // Tailcall to a fast function with a Lua frame below.
+ | ldrb RAw, [PC, #-4+OFS_RA]
+ | sub CARG1, BASE, RA, lsl #3
+ | ldr LFUNC:CARG1, [CARG1, #-32]
+ | and LFUNC:CARG1, CARG1, #LJ_GCVMASK
+ | ldr CARG1, LFUNC:CARG1->pc
+ | ldr KBASE, [CARG1, #PC2PROTO(k)]
+ | b <4
+ |
+ |7: // Tailcall from a vararg function.
+ | eor PC, PC, #FRAME_VARG
+ | tst PC, #FRAME_TYPEP // Vararg frame below?
+ | csel TMP2, RB, TMP2, ne // Clear ffid if no Lua function below.
+ | bne <1
+ | sub BASE, BASE, PC
+ | ldr PC, [BASE, FRAME_PC]
+ | tst PC, #FRAME_TYPE
+ | csel TMP2, RB, TMP2, ne // Clear ffid if no Lua function below.
+ | b <1
+ break;
+
+ case BC_ITERC:
+ | // RA = base, (RB = nresults+1, RC = nargs+1 (2+1))
+ | add RA, BASE, RA, lsl #3
+ | ldr CARG3, [RA, #-24]
+ | mov RB, BASE // Save old BASE for vmeta_call.
+ | ldp CARG1, CARG2, [RA, #-16]
+ | add BASE, RA, #16
+ | mov NARGS8:RC, #16 // Iterators get 2 arguments.
+ | str CARG3, [RA] // Copy callable.
+ | stp CARG1, CARG2, [RA, #16] // Copy state and control var.
+ | checkfunc CARG3, ->vmeta_call
+ | ins_call
+ break;
+
+ case BC_ITERN:
+ |.if JIT
+ | hotloop
+ |.endif
+ |->vm_IITERN:
+ | // RA = base, (RB = nresults+1, RC = nargs+1 (2+1))
+ | add RA, BASE, RA, lsl #3
+ | ldr TAB:RB, [RA, #-16]
+ | ldrh TMP3w, [PC, # OFS_RD]
+ | ldr CARG1w, [RA, #-8+LO] // Get index from control var.
+ | add PC, PC, #4
+ | add TMP3, PC, TMP3, lsl #2
+ | and TAB:RB, RB, #LJ_GCVMASK
+ | sub TMP3, TMP3, #0x20000
+ | ldr TMP1w, TAB:RB->asize
+ | ldr CARG2, TAB:RB->array
+ |1: // Traverse array part.
+ | subs RC, CARG1, TMP1
+ | add CARG3, CARG2, CARG1, lsl #3
+ | bhs >5 // Index points after array part?
+ | ldr TMP0, [CARG3]
+ | cmp_nil TMP0
+ | cinc CARG1, CARG1, eq // Skip holes in array part.
+ | beq <1
+ | add_TISNUM CARG1, CARG1
+ | stp CARG1, TMP0, [RA]
+ | add CARG1, CARG1, #1
+ |3:
+ | str CARG1w, [RA, #-8+LO] // Update control var.
+ | mov PC, TMP3
+ |4:
+ | ins_next
+ |
+ |5: // Traverse hash part.
+ | ldr TMP2w, TAB:RB->hmask
+ | ldr NODE:RB, TAB:RB->node
+ |6:
+ | add CARG1, RC, RC, lsl #1
+ | cmp RC, TMP2 // End of iteration? Branch to ITERN+1.
+ | add NODE:CARG3, NODE:RB, CARG1, lsl #3 // node = tab->node + idx*3*8
+ | bhi <4
+ | ldp TMP0, CARG1, NODE:CARG3->val
+ | cmp_nil TMP0
+ | add RC, RC, #1
+ | beq <6 // Skip holes in hash part.
+ | stp CARG1, TMP0, [RA]
+ | add CARG1, RC, TMP1
+ | b <3
+ break;
+
+ case BC_ISNEXT:
+ | // RA = base, RC = target (points to ITERN)
+ | add RA, BASE, RA, lsl #3
+ | ldr CFUNC:CARG1, [RA, #-24]
+ | add RC, PC, RC, lsl #2
+ | ldp TAB:CARG3, CARG4, [RA, #-16]
+ | sub RC, RC, #0x20000
+ | checkfunc CFUNC:CARG1, >5
+ | asr TMP0, TAB:CARG3, #47
+ | ldrb TMP1w, CFUNC:CARG1->ffid
+ | cmp_nil CARG4
+ | ccmn TMP0, #-LJ_TTAB, #0, eq
+ | ccmp TMP1w, #FF_next_N, #0, eq
+ | bne >5
+ | mov TMP0w, #0xfffe7fff // LJ_KEYINDEX
+ | lsl TMP0, TMP0, #32
+ | str TMP0, [RA, #-8] // Initialize control var.
+ |1:
+ | mov PC, RC
+ | ins_next
+ |
+ |5: // Despecialize bytecode if any of the checks fail.
+ |.if JIT
+ | ldrb TMP2w, [RC, # OFS_OP]
+ |.endif
+ | mov TMP0, #BC_JMP
+ | mov TMP1, #BC_ITERC
+ | strb TMP0w, [PC, #-4+OFS_OP]
+ |.if JIT
+ | cmp TMP2w, #BC_ITERN
+ | bne >6
+ |.endif
+ | strb TMP1w, [RC, # OFS_OP]
+ | b <1
+ |.if JIT
+ |6: // Unpatch JLOOP.
+ | ldr RA, [GL, #GL_J(trace)]
+ | ldrh TMP2w, [RC, # OFS_RD]
+ | ldr TRACE:RA, [RA, TMP2, lsl #3]
+ | ldr TMP2w, TRACE:RA->startins
+ | bfxil TMP2w, TMP1w, #0, #8
+ | str TMP2w, [RC]
+ | b <1
+ |.endif
+ break;
+
+ case BC_VARG:
+ | decode_RB RB, INS
+ | and RC, RC, #255
+ | // RA = base, RB = (nresults+1), RC = numparams
+ | ldr TMP1, [BASE, FRAME_PC]
+ | add TMP0, BASE, RC, lsl #3
+ | add RC, BASE, RA, lsl #3 // RC = destination
+ | add TMP0, TMP0, #FRAME_VARG
+ | add TMP2, RC, RB, lsl #3
+ | sub RA, TMP0, TMP1 // RA = vbase
+ | // Note: RA may now be even _above_ BASE if nargs was < numparams.
+ | sub TMP3, BASE, #16 // TMP3 = vtop
+ | cbz RB, >5
+ | sub TMP2, TMP2, #16
+ |1: // Copy vararg slots to destination slots.
+ | cmp RA, TMP3
+ | ldr TMP0, [RA], #8
+ | csinv TMP0, TMP0, xzr, lo // TISNIL = ~xzr
+ | cmp RC, TMP2
+ | str TMP0, [RC], #8
+ | blo <1
+ |2:
+ | ins_next
+ |
+ |5: // Copy all varargs.
+ | ldr TMP0, L->maxstack
+ | subs TMP2, TMP3, RA
+ | csel RB, xzr, TMP2, le // MULTRES = (max(vtop-vbase,0)+1)*8
+ | add RB, RB, #8
+ | add TMP1, RC, TMP2
+ | str RBw, SAVE_MULTRES
+ | ble <2 // Nothing to copy.
+ | cmp TMP1, TMP0
+ | bhi >7
+ |6:
+ | ldr TMP0, [RA], #8
+ | str TMP0, [RC], #8
+ | cmp RA, TMP3
+ | blo <6
+ | b <2
+ |
+ |7: // Grow stack for varargs.
+ | lsr CARG2, TMP2, #3
+ | stp BASE, RC, L->base
+ | mov CARG1, L
+ | sub RA, RA, BASE // Need delta, because BASE may change.
+ | str PC, SAVE_PC
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | ldp BASE, RC, L->base
+ | add RA, BASE, RA
+ | sub TMP3, BASE, #16
+ | b <6
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ | // RA = results, RC = extra results
+ | ldr TMP0w, SAVE_MULTRES
+ | ldr PC, [BASE, FRAME_PC]
+ | add RA, BASE, RA, lsl #3
+ | add RC, TMP0, RC, lsl #3
+ | b ->BC_RETM_Z
+ break;
+
+ case BC_RET:
+ | // RA = results, RC = nresults+1
+ | ldr PC, [BASE, FRAME_PC]
+ | lsl RC, RC, #3
+ | add RA, BASE, RA, lsl #3
+ |->BC_RETM_Z:
+ | str RCw, SAVE_MULTRES
+ |1:
+ | ands CARG1, PC, #FRAME_TYPE
+ | eor CARG2, PC, #FRAME_VARG
+ | bne ->BC_RETV2_Z
+ |
+ |->BC_RET_Z:
+ | // BASE = base, RA = resultptr, RC = (nresults+1)*8, PC = return
+ | ldr INSw, [PC, #-4]
+ | subs TMP1, RC, #8
+ | sub CARG3, BASE, #16
+ | beq >3
+ |2:
+ | ldr TMP0, [RA], #8
+ | add BASE, BASE, #8
+ | sub TMP1, TMP1, #8
+ | str TMP0, [BASE, #-24]
+ | cbnz TMP1, <2
+ |3:
+ | decode_RA RA, INS
+ | sub CARG4, CARG3, RA, lsl #3
+ | decode_RB RB, INS
+ | ldr LFUNC:CARG1, [CARG4, FRAME_FUNC]
+ |5:
+ | cmp RC, RB, lsl #3 // More results expected?
+ | blo >6
+ | and LFUNC:CARG1, CARG1, #LJ_GCVMASK
+ | mov BASE, CARG4
+ | ldr CARG2, LFUNC:CARG1->pc
+ | ldr KBASE, [CARG2, #PC2PROTO(k)]
+ | ins_next
+ |
+ |6: // Fill up results with nil.
+ | add BASE, BASE, #8
+ | add RC, RC, #8
+ | str TISNIL, [BASE, #-24]
+ | b <5
+ |
+ |->BC_RETV1_Z: // Non-standard return case.
+ | add RA, BASE, RA, lsl #3
+ |->BC_RETV2_Z:
+ | tst CARG2, #FRAME_TYPEP
+ | bne ->vm_return
+ | // Return from vararg function: relocate BASE down.
+ | sub BASE, BASE, CARG2
+ | ldr PC, [BASE, FRAME_PC]
+ | b <1
+ break;
+
+ case BC_RET0: case BC_RET1:
+ | // RA = results, RC = nresults+1
+ | ldr PC, [BASE, FRAME_PC]
+ | lsl RC, RC, #3
+ | str RCw, SAVE_MULTRES
+ | ands CARG1, PC, #FRAME_TYPE
+ | eor CARG2, PC, #FRAME_VARG
+ | bne ->BC_RETV1_Z
+ | ldr INSw, [PC, #-4]
+ if (op == BC_RET1) {
+ | ldr TMP0, [BASE, RA, lsl #3]
+ }
+ | sub CARG4, BASE, #16
+ | decode_RA RA, INS
+ | sub BASE, CARG4, RA, lsl #3
+ if (op == BC_RET1) {
+ | str TMP0, [CARG4], #8
+ }
+ | decode_RB RB, INS
+ | ldr LFUNC:CARG1, [BASE, FRAME_FUNC]
+ |5:
+ | cmp RC, RB, lsl #3
+ | blo >6
+ | and LFUNC:CARG1, CARG1, #LJ_GCVMASK
+ | ldr CARG2, LFUNC:CARG1->pc
+ | ldr KBASE, [CARG2, #PC2PROTO(k)]
+ | ins_next
+ |
+ |6: // Fill up results with nil.
+ | add RC, RC, #8
+ | str TISNIL, [CARG4], #8
+ | b <5
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+ |.define FOR_IDX, [RA]; .define FOR_TIDX, [RA, #4]
+ |.define FOR_STOP, [RA, #8]; .define FOR_TSTOP, [RA, #12]
+ |.define FOR_STEP, [RA, #16]; .define FOR_TSTEP, [RA, #20]
+ |.define FOR_EXT, [RA, #24]; .define FOR_TEXT, [RA, #28]
+
+ case BC_FORL:
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_IFORL follows.
+ break;
+
+ case BC_JFORI:
+ case BC_JFORL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_FORI:
+ case BC_IFORL:
+ | // RA = base, RC = target (after end of loop or start of loop)
+ vk = (op == BC_IFORL || op == BC_JFORL);
+ | add RA, BASE, RA, lsl #3
+ | ldp CARG1, CARG2, FOR_IDX // CARG1 = IDX, CARG2 = STOP
+ | ldr CARG3, FOR_STEP // CARG3 = STEP
+ if (op != BC_JFORL) {
+ | add RC, PC, RC, lsl #2
+ | sub RC, RC, #0x20000
+ }
+ | checkint CARG1, >5
+ if (!vk) {
+ | checkint CARG2, ->vmeta_for
+ | checkint CARG3, ->vmeta_for
+ | tbnz CARG3w, #31, >4
+ | cmp CARG1w, CARG2w
+ } else {
+ | adds CARG1w, CARG1w, CARG3w
+ | bvs >2
+ | add_TISNUM TMP0, CARG1
+ | tbnz CARG3w, #31, >4
+ | cmp CARG1w, CARG2w
+ }
+ |1:
+ if (op == BC_FORI) {
+ | csel PC, RC, PC, gt
+ } else if (op == BC_JFORI) {
+ | mov PC, RC
+ | ldrh RCw, [RC, #-4+OFS_RD]
+ } else if (op == BC_IFORL) {
+ | csel PC, RC, PC, le
+ }
+ if (vk) {
+ | str TMP0, FOR_IDX
+ | str TMP0, FOR_EXT
+ } else {
+ | str CARG1, FOR_EXT
+ }
+ if (op == BC_JFORI || op == BC_JFORL) {
+ | ble =>BC_JLOOP
+ }
+ |2:
+ | ins_next
+ |
+ |4: // Invert check for negative step.
+ | cmp CARG2w, CARG1w
+ | b <1
+ |
+ |5: // FP loop.
+ | ldp d0, d1, FOR_IDX
+ | blo ->vmeta_for
+ if (!vk) {
+ | checknum CARG2, ->vmeta_for
+ | checknum CARG3, ->vmeta_for
+ | str d0, FOR_EXT
+ } else {
+ | ldr d2, FOR_STEP
+ | fadd d0, d0, d2
+ }
+ | tbnz CARG3, #63, >7
+ | fcmp d0, d1
+ |6:
+ if (vk) {
+ | str d0, FOR_IDX
+ | str d0, FOR_EXT
+ }
+ if (op == BC_FORI) {
+ | csel PC, RC, PC, hi
+ } else if (op == BC_JFORI) {
+ | ldrh RCw, [RC, #-4+OFS_RD]
+ | bls =>BC_JLOOP
+ } else if (op == BC_IFORL) {
+ | csel PC, RC, PC, ls
+ } else {
+ | bls =>BC_JLOOP
+ }
+ | b <2
+ |
+ |7: // Invert check for negative step.
+ | fcmp d1, d0
+ | b <6
+ break;
+
+ case BC_ITERL:
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_IITERL follows.
+ break;
+
+ case BC_JITERL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IITERL:
+ | // RA = base, RC = target
+ | ldr CARG1, [BASE, RA, lsl #3]
+ | add TMP1, BASE, RA, lsl #3
+ | cmp_nil CARG1
+ | beq >1 // Stop if iterator returned nil.
+ if (op == BC_JITERL) {
+ | str CARG1, [TMP1, #-8]
+ | b =>BC_JLOOP
+ } else {
+ | add TMP0, PC, RC, lsl #2 // Otherwise save control var + branch.
+ | sub PC, TMP0, #0x20000
+ | str CARG1, [TMP1, #-8]
+ }
+ |1:
+ | ins_next
+ break;
+
+ case BC_LOOP:
+ | // RA = base, RC = target (loop extent)
+ | // Note: RA/RC is only used by trace recorder to determine scope/extent
+ | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_ILOOP follows.
+ break;
+
+ case BC_ILOOP:
+ | // RA = base, RC = target (loop extent)
+ | ins_next
+ break;
+
+ case BC_JLOOP:
+ |.if JIT
+ | // RA = base (ignored), RC = traceno
+ | ldr CARG1, [GL, #GL_J(trace)]
+ | st_vmstate wzr // Traces on ARM64 don't store the trace #, so use 0.
+ | ldr TRACE:RC, [CARG1, RC, lsl #3]
+ |.if PAUTH
+ | ldr RA, TRACE:RC->mcauth
+ |.else
+ | ldr RA, TRACE:RC->mcode
+ |.endif
+ | str BASE, GL->jit_base
+ | str L, GL->tmpbuf.L
+ | sub sp, sp, #16 // See SPS_FIXED. Avoids sp adjust in every root trace.
+ |.if PAUTH
+ | braa RA, RC
+ |.else
+ | br RA
+ |.endif
+ |.endif
+ break;
+
+ case BC_JMP:
+ | // RA = base (only used by trace recorder), RC = target
+ | add RC, PC, RC, lsl #2
+ | sub PC, RC, #0x20000
+ | ins_next
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ case BC_FUNCF:
+ |.if JIT
+ | hotcall
+ |.endif
+ case BC_FUNCV: /* NYI: compiled vararg functions. */
+ | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow.
+ break;
+
+ case BC_JFUNCF:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IFUNCF:
+ | // BASE = new base, RA = BASE+framesize*8, CARG3 = LFUNC, RC = nargs*8
+ | ldr CARG1, L->maxstack
+ | ldrb TMP1w, [PC, #-4+PC2PROTO(numparams)]
+ | ldr KBASE, [PC, #-4+PC2PROTO(k)]
+ | cmp RA, CARG1
+ | bhi ->vm_growstack_l
+ |2:
+ | cmp NARGS8:RC, TMP1, lsl #3 // Check for missing parameters.
+ | blo >3
+ if (op == BC_JFUNCF) {
+ | decode_RD RC, INS
+ | b =>BC_JLOOP
+ } else {
+ | ins_next
+ }
+ |
+ |3: // Clear missing parameters.
+ | str TISNIL, [BASE, NARGS8:RC]
+ | add NARGS8:RC, NARGS8:RC, #8
+ | b <2
+ break;
+
+ case BC_JFUNCV:
+#if !LJ_HASJIT
+ break;
+#endif
+ | NYI // NYI: compiled vararg functions
+ break; /* NYI: compiled vararg functions. */
+
+ case BC_IFUNCV:
+ | // BASE = new base, RA = BASE+framesize*8, CARG3 = LFUNC, RC = nargs*8
+ | ldr CARG1, L->maxstack
+ | movn TMP0, #~LJ_TFUNC
+ | add TMP2, BASE, RC
+ | add LFUNC:CARG3, CARG3, TMP0, lsl #47
+ | add RA, RA, RC
+ | sub CARG1, CARG1, #8
+ | add TMP0, RC, #16+FRAME_VARG
+ | str LFUNC:CARG3, [TMP2], #8 // Store (tagged) copy of LFUNC.
+ | ldr KBASE, [PC, #-4+PC2PROTO(k)]
+ | cmp RA, CARG1
+ | str TMP0, [TMP2], #8 // Store delta + FRAME_VARG.
+ | bhs ->vm_growstack_l
+ | sub RC, TMP2, #16
+ | ldrb TMP1w, [PC, #-4+PC2PROTO(numparams)]
+ | mov RA, BASE
+ | mov BASE, TMP2
+ | cbz TMP1, >2
+ |1:
+ | cmp RA, RC // Less args than parameters?
+ | bhs >3
+ | ldr TMP0, [RA]
+ | sub TMP1, TMP1, #1
+ | str TISNIL, [RA], #8 // Clear old fixarg slot (help the GC).
+ | str TMP0, [TMP2], #8
+ | cbnz TMP1, <1
+ |2:
+ | ins_next
+ |
+ |3:
+ | sub TMP1, TMP1, #1
+ | str TISNIL, [TMP2], #8
+ | cbz TMP1, <2
+ | b <3
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ | // BASE = new base, RA = BASE+framesize*8, CARG3 = CFUNC, RC = nargs*8
+ if (op == BC_FUNCC) {
+ | ldr CARG4, CFUNC:CARG3->f
+ } else {
+ | ldr CARG4, GL->wrapf
+ }
+ | add CARG2, RA, NARGS8:RC
+ | ldr CARG1, L->maxstack
+ | add RC, BASE, NARGS8:RC
+ | cmp CARG2, CARG1
+ | stp BASE, RC, L->base
+ if (op == BC_FUNCCW) {
+ | ldr CARG2, CFUNC:CARG3->f
+ }
+ | mv_vmstate TMP0w, C
+ | mov CARG1, L
+ | bhi ->vm_growstack_c // Need to grow stack.
+ | st_vmstate TMP0w
+ | blr_auth CARG4 // (lua_State *L [, lua_CFunction f])
+ | // Returns nresults.
+ | ldp BASE, TMP1, L->base
+ | str L, GL->cur_L
+ | sbfiz RC, CRET1, #3, #32
+ | st_vmstate ST_INTERP
+ | ldr PC, [BASE, FRAME_PC]
+ | sub RA, TMP1, RC // RA = L->top - nresults*8
+ | b ->vm_returnc
+ break;
+
+ /* ---------------------------------------------------------------------- */
+
+ default:
+ fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
+ exit(2);
+ break;
+ }
+}
+
+static int build_backend(BuildCtx *ctx)
+{
+ int op;
+
+ dasm_growpc(Dst, BC__MAX);
+
+ build_subroutines(ctx);
+
+ |.code_op
+ for (op = 0; op < BC__MAX; op++)
+ build_ins(ctx, (BCOp)op, op);
+
+ return BC__MAX;
+}
+
+/* Emit pseudo frame-info for all assembler functions. */
+static void emit_asm_debug(BuildCtx *ctx)
+{
+ int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
+ int i;
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp, "\t.section .debug_frame,\"\",%%progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe0:\n"
+ "\t.long .LECIE0-.LSCIE0\n"
+ ".LSCIE0:\n"
+ "\t.long 0xffffffff\n"
+ "\t.byte 0x1\n"
+ "\t.string \"\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -8\n"
+ "\t.byte 30\n" /* Return address is in lr. */
+ "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 16\n" /* def_cfa fp 16 */
+ "\t.align 3\n"
+ ".LECIE0:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE0:\n"
+ "\t.long .LEFDE0-.LASFDE0\n"
+ ".LASFDE0:\n"
+ "\t.long .Lframe0\n"
+ "\t.quad .Lbegin\n"
+ "\t.quad %d\n"
+ "\t.byte 0x9e\n\t.uleb128 1\n" /* offset lr */
+ "\t.byte 0x9d\n\t.uleb128 2\n", /* offset fp */
+ fcofs);
+ for (i = 19; i <= 28; i++) /* offset x19-x28 */
+ fprintf(ctx->fp, "\t.byte 0x%x\n\t.uleb128 %d\n", 0x80+i, i+(3-19));
+ for (i = 8; i <= 15; i++) /* offset d8-d15 */
+ fprintf(ctx->fp, "\t.byte 5\n\t.uleb128 0x%x\n\t.uleb128 %d\n",
+ 64+i, i+(3+(28-19+1)-8));
+ fprintf(ctx->fp,
+ "\t.align 3\n"
+ ".LEFDE0:\n\n");
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".LSFDE1:\n"
+ "\t.long .LEFDE1-.LASFDE1\n"
+ ".LASFDE1:\n"
+ "\t.long .Lframe0\n"
+ "\t.quad lj_vm_ffi_call\n"
+ "\t.quad %d\n"
+ "\t.byte 0x9e\n\t.uleb128 1\n" /* offset lr */
+ "\t.byte 0x9d\n\t.uleb128 2\n" /* offset fp */
+ "\t.byte 0x93\n\t.uleb128 3\n" /* offset x19 */
+ "\t.byte 0x94\n\t.uleb128 4\n" /* offset x20 */
+ "\t.align 3\n"
+ ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
+#endif
+#if !LJ_NO_UNWIND
+ fprintf(ctx->fp, "\t.section .eh_frame,\"a\",%%progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe1:\n"
+ "\t.long .LECIE1-.LSCIE1\n"
+ ".LSCIE1:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zPR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -8\n"
+ "\t.byte 30\n" /* Return address is in lr. */
+ "\t.uleb128 6\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.long lj_err_unwind_dwarf-.\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 16\n" /* def_cfa fp 16 */
+ "\t.align 3\n"
+ ".LECIE1:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE2:\n"
+ "\t.long .LEFDE2-.LASFDE2\n"
+ ".LASFDE2:\n"
+ "\t.long .LASFDE2-.Lframe1\n"
+ "\t.long .Lbegin-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0x9e\n\t.uleb128 1\n" /* offset lr */
+ "\t.byte 0x9d\n\t.uleb128 2\n", /* offset fp */
+ fcofs);
+ for (i = 19; i <= 28; i++) /* offset x19-x28 */
+ fprintf(ctx->fp, "\t.byte 0x%x\n\t.uleb128 %d\n", 0x80+i, i+(3-19));
+ for (i = 8; i <= 15; i++) /* offset d8-d15 */
+ fprintf(ctx->fp, "\t.byte 5\n\t.uleb128 0x%x\n\t.uleb128 %d\n",
+ 64+i, i+(3+(28-19+1)-8));
+ fprintf(ctx->fp,
+ "\t.align 3\n"
+ ".LEFDE2:\n\n");
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".Lframe2:\n"
+ "\t.long .LECIE2-.LSCIE2\n"
+ ".LSCIE2:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -8\n"
+ "\t.byte 30\n" /* Return address is in lr. */
+ "\t.uleb128 1\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 16\n" /* def_cfa fp 16 */
+ "\t.align 3\n"
+ ".LECIE2:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE3:\n"
+ "\t.long .LEFDE3-.LASFDE3\n"
+ ".LASFDE3:\n"
+ "\t.long .LASFDE3-.Lframe2\n"
+ "\t.long lj_vm_ffi_call-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0x9e\n\t.uleb128 1\n" /* offset lr */
+ "\t.byte 0x9d\n\t.uleb128 2\n" /* offset fp */
+ "\t.byte 0x93\n\t.uleb128 3\n" /* offset x19 */
+ "\t.byte 0x94\n\t.uleb128 4\n" /* offset x20 */
+ "\t.align 3\n"
+ ".LEFDE3:\n\n", (int)ctx->codesz - fcofs);
+#endif
+#endif
+ break;
+#if !LJ_NO_UNWIND
+ case BUILD_machasm: {
+#if LJ_HASFFI
+ int fcsize = 0;
+#endif
+ int j;
+ fprintf(ctx->fp, "\t.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support\n");
+ fprintf(ctx->fp,
+ "EH_frame1:\n"
+ "\t.set L$set$x,LECIEX-LSCIEX\n"
+ "\t.long L$set$x\n"
+ "LSCIEX:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.ascii \"zPR\\0\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -8\n"
+ "\t.byte 30\n" /* Return address is in lr. */
+ "\t.uleb128 6\n" /* augmentation length */
+ "\t.byte 0x9b\n" /* indirect|pcrel|sdata4 */
+ "\t.long _lj_err_unwind_dwarf@GOT-.\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 16\n" /* def_cfa fp 16 */
+ "\t.align 3\n"
+ "LECIEX:\n\n");
+ for (j = 0; j < ctx->nsym; j++) {
+ const char *name = ctx->sym[j].name;
+ int32_t size = ctx->sym[j+1].ofs - ctx->sym[j].ofs;
+ if (size == 0) continue;
+#if LJ_HASFFI
+ if (!strcmp(name, "_lj_vm_ffi_call")) { fcsize = size; continue; }
+#endif
+ fprintf(ctx->fp,
+ "LSFDE%d:\n"
+ "\t.set L$set$%d,LEFDE%d-LASFDE%d\n"
+ "\t.long L$set$%d\n"
+ "LASFDE%d:\n"
+ "\t.long LASFDE%d-EH_frame1\n"
+ "\t.long %s-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0x9e\n\t.uleb128 1\n" /* offset lr */
+ "\t.byte 0x9d\n\t.uleb128 2\n", /* offset fp */
+ j, j, j, j, j, j, j, name, size);
+ for (i = 19; i <= 28; i++) /* offset x19-x28 */
+ fprintf(ctx->fp, "\t.byte 0x%x\n\t.uleb128 %d\n", 0x80+i, i+(3-19));
+ for (i = 8; i <= 15; i++) /* offset d8-d15 */
+ fprintf(ctx->fp, "\t.byte 5\n\t.uleb128 0x%x\n\t.uleb128 %d\n",
+ 64+i, i+(3+(28-19+1)-8));
+ fprintf(ctx->fp,
+ "\t.align 3\n"
+ "LEFDE%d:\n\n", j);
+ }
+#if LJ_HASFFI
+ if (fcsize) {
+ fprintf(ctx->fp,
+ "EH_frame2:\n"
+ "\t.set L$set$y,LECIEY-LSCIEY\n"
+ "\t.long L$set$y\n"
+ "LSCIEY:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.ascii \"zR\\0\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -8\n"
+ "\t.byte 30\n" /* Return address is in lr. */
+ "\t.uleb128 1\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 16\n" /* def_cfa fp 16 */
+ "\t.align 3\n"
+ "LECIEY:\n\n");
+ fprintf(ctx->fp,
+ "LSFDEY:\n"
+ "\t.set L$set$yy,LEFDEY-LASFDEY\n"
+ "\t.long L$set$yy\n"
+ "LASFDEY:\n"
+ "\t.long LASFDEY-EH_frame2\n"
+ "\t.long _lj_vm_ffi_call-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0x9e\n\t.uleb128 1\n" /* offset lr */
+ "\t.byte 0x9d\n\t.uleb128 2\n" /* offset fp */
+ "\t.byte 0x93\n\t.uleb128 3\n" /* offset x19 */
+ "\t.byte 0x94\n\t.uleb128 4\n" /* offset x20 */
+ "\t.align 3\n"
+ "LEFDEY:\n\n", fcsize);
+ }
+#endif
+ fprintf(ctx->fp, ".subsections_via_symbols\n");
+ }
+ break;
+#endif
+ default:
+ break;
+ }
+}
+
|. addiu NARGS8:RC, NARGS8:RC, -8
|
|.ffunc xpcall
+ | lw TMP1, L->maxstack
+ | addu TMP2, BASE, NARGS8:RC
| sltiu AT, NARGS8:RC, 16
| lw CARG4, 8+HI(BASE)
+ | sltu TMP1, TMP1, TMP2
+ | or AT, AT, TMP1
| bnez AT, ->fff_fallback
- |. ldc1 FARG2, 8(BASE)
- | ldc1 FARG1, 0(BASE)
+ |. lw CARG3, 8+LO(BASE)
+ | lw CARG1, LO(BASE)
+ | lw CARG2, HI(BASE)
| lbu TMP1, DISPATCH_GL(hookmask)(DISPATCH)
| li AT, LJ_TFUNC
| move TMP2, BASE
--- /dev/null
- | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+|// Low-level VM code for MIPS64 CPUs.
+|// Bytecode interpreter, fast functions and helper functions.
+|// Copyright (C) 2005-2023 Mike Pall. See Copyright Notice in luajit.h
+|//
+|// Contributed by Djordje Kovacevic and Stefan Pejic from RT-RK.com.
+|// Sponsored by Cisco Systems, Inc.
+|
+|.arch mips64
+|.section code_op, code_sub
+|
+|.actionlist build_actionlist
+|.globals GLOB_
+|.globalnames globnames
+|.externnames extnames
+|
+|// Note: The ragged indentation of the instructions is intentional.
+|// The starting columns indicate data dependencies.
+|
+|//-----------------------------------------------------------------------
+|
+|// Fixed register assignments for the interpreter.
+|// Don't use: r0 = 0, r26/r27 = reserved, r28 = gp, r29 = sp, r31 = ra
+|
+|.macro .FPU, a, b
+|.if FPU
+| a, b
+|.endif
+|.endmacro
+|
+|// The following must be C callee-save (but BASE is often refetched).
+|.define BASE, r16 // Base of current Lua stack frame.
+|.define KBASE, r17 // Constants of current Lua function.
+|.define PC, r18 // Next PC.
+|.define DISPATCH, r19 // Opcode dispatch table.
+|.define LREG, r20 // Register holding lua_State (also in SAVE_L).
+|.define MULTRES, r21 // Size of multi-result: (nresults+1)*8.
+|
+|.define JGL, r30 // On-trace: global_State + 32768.
+|
+|// Constants for type-comparisons, stores and conversions. C callee-save.
+|.define TISNIL, r30
+|.define TISNUM, r22
+|.if FPU
+|.define TOBIT, f30 // 2^52 + 2^51.
+|.endif
+|
+|// The following temporaries are not saved across C calls, except for RA.
+|.define RA, r23 // Callee-save.
+|.define RB, r8
+|.define RC, r9
+|.define RD, r10
+|.define INS, r11
+|
+|.define AT, r1 // Assembler temporary.
+|.define TMP0, r12
+|.define TMP1, r13
+|.define TMP2, r14
+|.define TMP3, r15
+|
+|// MIPS n64 calling convention.
+|.define CFUNCADDR, r25
+|.define CARG1, r4
+|.define CARG2, r5
+|.define CARG3, r6
+|.define CARG4, r7
+|.define CARG5, r8
+|.define CARG6, r9
+|.define CARG7, r10
+|.define CARG8, r11
+|
+|.define CRET1, r2
+|.define CRET2, r3
+|
+|.if FPU
+|.define FARG1, f12
+|.define FARG2, f13
+|.define FARG3, f14
+|.define FARG4, f15
+|.define FARG5, f16
+|.define FARG6, f17
+|.define FARG7, f18
+|.define FARG8, f19
+|
+|.define FRET1, f0
+|.define FRET2, f2
+|
+|.define FTMP0, f20
+|.define FTMP1, f21
+|.define FTMP2, f22
+|.endif
+|
+|// Stack layout while in interpreter. Must match with lj_frame.h.
+|.if FPU // MIPS64 hard-float.
+|
+|.define CFRAME_SPACE, 192 // Delta for sp.
+|
+|//----- 16 byte aligned, <-- sp entering interpreter
+|.define SAVE_ERRF, 188(sp) // 32 bit values.
+|.define SAVE_NRES, 184(sp)
+|.define SAVE_CFRAME, 176(sp) // 64 bit values.
+|.define SAVE_L, 168(sp)
+|.define SAVE_PC, 160(sp)
+|//----- 16 byte aligned
+|.define SAVE_GPR_, 80 // .. 80+10*8: 64 bit GPR saves.
+|.define SAVE_FPR_, 16 // .. 16+8*8: 64 bit FPR saves.
+|
+|.else // MIPS64 soft-float
+|
+|.define CFRAME_SPACE, 128 // Delta for sp.
+|
+|//----- 16 byte aligned, <-- sp entering interpreter
+|.define SAVE_ERRF, 124(sp) // 32 bit values.
+|.define SAVE_NRES, 120(sp)
+|.define SAVE_CFRAME, 112(sp) // 64 bit values.
+|.define SAVE_L, 104(sp)
+|.define SAVE_PC, 96(sp)
+|//----- 16 byte aligned
+|.define SAVE_GPR_, 16 // .. 16+10*8: 64 bit GPR saves.
+|
+|.endif
+|
+|.define TMPX, 8(sp) // Unused by interpreter, temp for JIT code.
+|.define TMPD, 0(sp)
+|//----- 16 byte aligned
+|
+|.define TMPD_OFS, 0
+|
+|.define SAVE_MULTRES, TMPD
+|
+|//-----------------------------------------------------------------------
+|
+|.macro saveregs
+| daddiu sp, sp, -CFRAME_SPACE
+| sd ra, SAVE_GPR_+9*8(sp)
+| sd r30, SAVE_GPR_+8*8(sp)
+| .FPU sdc1 f31, SAVE_FPR_+7*8(sp)
+| sd r23, SAVE_GPR_+7*8(sp)
+| .FPU sdc1 f30, SAVE_FPR_+6*8(sp)
+| sd r22, SAVE_GPR_+6*8(sp)
+| .FPU sdc1 f29, SAVE_FPR_+5*8(sp)
+| sd r21, SAVE_GPR_+5*8(sp)
+| .FPU sdc1 f28, SAVE_FPR_+4*8(sp)
+| sd r20, SAVE_GPR_+4*8(sp)
+| .FPU sdc1 f27, SAVE_FPR_+3*8(sp)
+| sd r19, SAVE_GPR_+3*8(sp)
+| .FPU sdc1 f26, SAVE_FPR_+2*8(sp)
+| sd r18, SAVE_GPR_+2*8(sp)
+| .FPU sdc1 f25, SAVE_FPR_+1*8(sp)
+| sd r17, SAVE_GPR_+1*8(sp)
+| .FPU sdc1 f24, SAVE_FPR_+0*8(sp)
+| sd r16, SAVE_GPR_+0*8(sp)
+|.endmacro
+|
+|.macro restoreregs_ret
+| ld ra, SAVE_GPR_+9*8(sp)
+| ld r30, SAVE_GPR_+8*8(sp)
+| ld r23, SAVE_GPR_+7*8(sp)
+| .FPU ldc1 f31, SAVE_FPR_+7*8(sp)
+| ld r22, SAVE_GPR_+6*8(sp)
+| .FPU ldc1 f30, SAVE_FPR_+6*8(sp)
+| ld r21, SAVE_GPR_+5*8(sp)
+| .FPU ldc1 f29, SAVE_FPR_+5*8(sp)
+| ld r20, SAVE_GPR_+4*8(sp)
+| .FPU ldc1 f28, SAVE_FPR_+4*8(sp)
+| ld r19, SAVE_GPR_+3*8(sp)
+| .FPU ldc1 f27, SAVE_FPR_+3*8(sp)
+| ld r18, SAVE_GPR_+2*8(sp)
+| .FPU ldc1 f26, SAVE_FPR_+2*8(sp)
+| ld r17, SAVE_GPR_+1*8(sp)
+| .FPU ldc1 f25, SAVE_FPR_+1*8(sp)
+| ld r16, SAVE_GPR_+0*8(sp)
+| .FPU ldc1 f24, SAVE_FPR_+0*8(sp)
+| jr ra
+| daddiu sp, sp, CFRAME_SPACE
+|.endmacro
+|
+|// Type definitions. Some of these are only used for documentation.
+|.type L, lua_State, LREG
+|.type GL, global_State
+|.type TVALUE, TValue
+|.type GCOBJ, GCobj
+|.type STR, GCstr
+|.type TAB, GCtab
+|.type LFUNC, GCfuncL
+|.type CFUNC, GCfuncC
+|.type PROTO, GCproto
+|.type UPVAL, GCupval
+|.type NODE, Node
+|.type NARGS8, int
+|.type TRACE, GCtrace
+|.type SBUF, SBuf
+|
+|//-----------------------------------------------------------------------
+|
+|// Trap for not-yet-implemented parts.
+|.macro NYI; .long 0xec1cf0f0; .endmacro
+|
+|// Macros to mark delay slots.
+|.macro ., a; a; .endmacro
+|.macro ., a,b; a,b; .endmacro
+|.macro ., a,b,c; a,b,c; .endmacro
+|.macro ., a,b,c,d; a,b,c,d; .endmacro
+|
+|.define FRAME_PC, -8
+|.define FRAME_FUNC, -16
+|
+|//-----------------------------------------------------------------------
+|
+|// Endian-specific defines.
+|.if ENDIAN_LE
+|.define HI, 4
+|.define LO, 0
+|.define OFS_RD, 2
+|.define OFS_RA, 1
+|.define OFS_OP, 0
+|.else
+|.define HI, 0
+|.define LO, 4
+|.define OFS_RD, 0
+|.define OFS_RA, 2
+|.define OFS_OP, 3
+|.endif
+|
+|// Instruction decode.
+|.macro decode_OP1, dst, ins; andi dst, ins, 0xff; .endmacro
+|.macro decode_OP8a, dst, ins; andi dst, ins, 0xff; .endmacro
+|.macro decode_OP8b, dst; sll dst, dst, 3; .endmacro
+|.macro decode_RC8a, dst, ins; srl dst, ins, 13; .endmacro
+|.macro decode_RC8b, dst; andi dst, dst, 0x7f8; .endmacro
+|.macro decode_RD4b, dst; sll dst, dst, 2; .endmacro
+|.macro decode_RA8a, dst, ins; srl dst, ins, 5; .endmacro
+|.macro decode_RA8b, dst; andi dst, dst, 0x7f8; .endmacro
+|.macro decode_RB8a, dst, ins; srl dst, ins, 21; .endmacro
+|.macro decode_RB8b, dst; andi dst, dst, 0x7f8; .endmacro
+|.macro decode_RD8a, dst, ins; srl dst, ins, 16; .endmacro
+|.macro decode_RD8b, dst; sll dst, dst, 3; .endmacro
+|.macro decode_RDtoRC8, dst, src; andi dst, src, 0x7f8; .endmacro
+|
+|// Instruction fetch.
+|.macro ins_NEXT1
+| lw INS, 0(PC)
+| daddiu PC, PC, 4
+|.endmacro
+|// Instruction decode+dispatch.
+|.macro ins_NEXT2
+| decode_OP8a TMP1, INS
+| decode_OP8b TMP1
+| daddu TMP0, DISPATCH, TMP1
+| decode_RD8a RD, INS
+| ld AT, 0(TMP0)
+| decode_RA8a RA, INS
+| decode_RD8b RD
+| jr AT
+| decode_RA8b RA
+|.endmacro
+|.macro ins_NEXT
+| ins_NEXT1
+| ins_NEXT2
+|.endmacro
+|
+|// Instruction footer.
+|.if 1
+| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
+| .define ins_next, ins_NEXT
+| .define ins_next_, ins_NEXT
+| .define ins_next1, ins_NEXT1
+| .define ins_next2, ins_NEXT2
+|.else
+| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
+| // Affects only certain kinds of benchmarks (and only with -j off).
+| .macro ins_next
+| b ->ins_next
+| .endmacro
+| .macro ins_next1
+| .endmacro
+| .macro ins_next2
+| b ->ins_next
+| .endmacro
+| .macro ins_next_
+| ->ins_next:
+| ins_NEXT
+| .endmacro
+|.endif
+|
+|// Call decode and dispatch.
+|.macro ins_callt
+| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+| ld PC, LFUNC:RB->pc
+| lw INS, 0(PC)
+| daddiu PC, PC, 4
+| decode_OP8a TMP1, INS
+| decode_RA8a RA, INS
+| decode_OP8b TMP1
+| decode_RA8b RA
+| daddu TMP0, DISPATCH, TMP1
+| ld TMP0, 0(TMP0)
+| jr TMP0
+| daddu RA, RA, BASE
+|.endmacro
+|
+|.macro ins_call
+| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, PC = caller PC
+| sd PC, FRAME_PC(BASE)
+| ins_callt
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|.macro branch_RD
+| srl TMP0, RD, 1
+| lui AT, (-(BCBIAS_J*4 >> 16) & 65535)
+| addu TMP0, TMP0, AT
+| daddu PC, PC, TMP0
+|.endmacro
+|
+|// Assumes DISPATCH is relative to GL.
+#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
+#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
+#define GG_DISP2GOT (GG_OFS(got) - GG_OFS(dispatch))
+#define DISPATCH_GOT(name) (GG_DISP2GOT + sizeof(void*)*LJ_GOT_##name)
+|
+#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
+|
+|.macro load_got, func
+| ld CFUNCADDR, DISPATCH_GOT(func)(DISPATCH)
+|.endmacro
+|// Much faster. Sadly, there's no easy way to force the required code layout.
+|// .macro call_intern, func; bal extern func; .endmacro
+|.macro call_intern, func; jalr CFUNCADDR; .endmacro
+|.macro call_extern; jalr CFUNCADDR; .endmacro
+|.macro jmp_extern; jr CFUNCADDR; .endmacro
+|
+|.macro hotcheck, delta, target
+| dsrl TMP1, PC, 1
+| andi TMP1, TMP1, 126
+| daddu TMP1, TMP1, DISPATCH
+| lhu TMP2, GG_DISP2HOT(TMP1)
+| addiu TMP2, TMP2, -delta
+| bltz TMP2, target
+|. sh TMP2, GG_DISP2HOT(TMP1)
+|.endmacro
+|
+|.macro hotloop
+| hotcheck HOTCOUNT_LOOP, ->vm_hotloop
+|.endmacro
+|
+|.macro hotcall
+| hotcheck HOTCOUNT_CALL, ->vm_hotcall
+|.endmacro
+|
+|// Set current VM state. Uses TMP0.
+|.macro li_vmstate, st; li TMP0, ~LJ_VMST_..st; .endmacro
+|.macro st_vmstate; sw TMP0, DISPATCH_GL(vmstate)(DISPATCH); .endmacro
+|
+|// Move table write barrier back. Overwrites mark and tmp.
+|.macro barrierback, tab, mark, tmp, target
+| ld tmp, DISPATCH_GL(gc.grayagain)(DISPATCH)
+| andi mark, mark, ~LJ_GC_BLACK & 255 // black2gray(tab)
+| sd tab, DISPATCH_GL(gc.grayagain)(DISPATCH)
+| sb mark, tab->marked
+| b target
+|. sd tmp, tab->gclist
+|.endmacro
+|
+|// Clear type tag. Isolate lowest 14+32+1=47 bits of reg.
+|.macro cleartp, reg; dextm reg, reg, 0, 14; .endmacro
+|.macro cleartp, dst, reg; dextm dst, reg, 0, 14; .endmacro
+|
+|// Set type tag: Merge 17 type bits into bits [15+32=47, 31+32+1=64) of dst.
+|.macro settp, dst, tp; dinsu dst, tp, 15, 31; .endmacro
+|
+|// Extract (negative) type tag.
+|.macro gettp, dst, src; dsra dst, src, 47; .endmacro
+|
+|// Macros to check the TValue type and extract the GCobj. Branch on failure.
+|.macro checktp, reg, tp, target
+| gettp AT, reg
+| daddiu AT, AT, tp
+| bnez AT, target
+|. cleartp reg
+|.endmacro
+|.macro checktp, dst, reg, tp, target
+| gettp AT, reg
+| daddiu AT, AT, tp
+| bnez AT, target
+|. cleartp dst, reg
+|.endmacro
+|.macro checkstr, reg, target; checktp reg, -LJ_TSTR, target; .endmacro
+|.macro checktab, reg, target; checktp reg, -LJ_TTAB, target; .endmacro
+|.macro checkfunc, reg, target; checktp reg, -LJ_TFUNC, target; .endmacro
+|.macro checkint, reg, target // Caveat: has delay slot!
+| gettp AT, reg
+| bne AT, TISNUM, target
+|.endmacro
+|.macro checknum, reg, target // Caveat: has delay slot!
+| gettp AT, reg
+| sltiu AT, AT, LJ_TISNUM
+| beqz AT, target
+|.endmacro
+|
+|.macro mov_false, reg
+| lu reg, 0x8000
+| dsll reg, reg, 32
+| not reg, reg
+|.endmacro
+|.macro mov_true, reg
+| li reg, 0x0001
+| dsll reg, reg, 48
+| not reg, reg
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+
+/* Generate subroutines used by opcodes and other parts of the VM. */
+/* The .code_sub section should be last to help static branch prediction. */
+static void build_subroutines(BuildCtx *ctx)
+{
+ |.code_sub
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Return handling ----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_returnp:
+ | // See vm_return. Also: TMP2 = previous base.
+ | andi AT, PC, FRAME_P
+ | beqz AT, ->cont_dispatch
+ |
+ | // Return from pcall or xpcall fast func.
+ |. mov_true TMP1
+ | ld PC, FRAME_PC(TMP2) // Fetch PC of previous frame.
+ | move BASE, TMP2 // Restore caller base.
+ | // Prepending may overwrite the pcall frame, so do it at the end.
+ | sd TMP1, -8(RA) // Prepend true to results.
+ | daddiu RA, RA, -8
+ |
+ |->vm_returnc:
+ | addiu RD, RD, 8 // RD = (nresults+1)*8.
+ | andi TMP0, PC, FRAME_TYPE
+ | beqz RD, ->vm_unwind_c_eh
+ |. li CRET1, LUA_YIELD
+ | beqz TMP0, ->BC_RET_Z // Handle regular return to Lua.
+ |. move MULTRES, RD
+ |
+ |->vm_return:
+ | // BASE = base, RA = resultptr, RD/MULTRES = (nresults+1)*8, PC = return
+ | // TMP0 = PC & FRAME_TYPE
+ | li TMP2, -8
+ | xori AT, TMP0, FRAME_C
+ | and TMP2, PC, TMP2
+ | bnez AT, ->vm_returnp
+ | dsubu TMP2, BASE, TMP2 // TMP2 = previous base.
+ |
+ | addiu TMP1, RD, -8
+ | sd TMP2, L->base
+ | li_vmstate C
+ | lw TMP2, SAVE_NRES
+ | daddiu BASE, BASE, -16
+ | st_vmstate
+ | beqz TMP1, >2
+ |. sll TMP2, TMP2, 3
+ |1:
+ | addiu TMP1, TMP1, -8
+ | ld CRET1, 0(RA)
+ | daddiu RA, RA, 8
+ | sd CRET1, 0(BASE)
+ | bnez TMP1, <1
+ |. daddiu BASE, BASE, 8
+ |
+ |2:
+ | bne TMP2, RD, >6
+ |3:
+ |. sd BASE, L->top // Store new top.
+ |
+ |->vm_leave_cp:
+ | ld TMP0, SAVE_CFRAME // Restore previous C frame.
+ | move CRET1, r0 // Ok return status for vm_pcall.
+ | sd TMP0, L->cframe
+ |
+ |->vm_leave_unw:
+ | restoreregs_ret
+ |
+ |6:
+ | ld TMP1, L->maxstack
+ | slt AT, TMP2, RD
+ | bnez AT, >7 // Less results wanted?
+ | // More results wanted. Check stack size and fill up results with nil.
+ |. slt AT, BASE, TMP1
+ | beqz AT, >8
+ |. nop
+ | sd TISNIL, 0(BASE)
+ | addiu RD, RD, 8
+ | b <2
+ |. daddiu BASE, BASE, 8
+ |
+ |7: // Less results wanted.
+ | subu TMP0, RD, TMP2
+ | dsubu TMP0, BASE, TMP0 // Either keep top or shrink it.
+ |.if MIPSR6
+ | selnez TMP0, TMP0, TMP2 // LUA_MULTRET+1 case?
+ | seleqz BASE, BASE, TMP2
+ | b <3
+ |. or BASE, BASE, TMP0
+ |.else
+ | b <3
+ |. movn BASE, TMP0, TMP2 // LUA_MULTRET+1 case?
+ |.endif
+ |
+ |8: // Corner case: need to grow stack for filling up results.
+ | // This can happen if:
+ | // - A C function grows the stack (a lot).
+ | // - The GC shrinks the stack in between.
+ | // - A return back from a lua_call() with (high) nresults adjustment.
+ | load_got lj_state_growstack
+ | move MULTRES, RD
+ | srl CARG2, TMP2, 3
+ | call_intern lj_state_growstack // (lua_State *L, int n)
+ |. move CARG1, L
+ | lw TMP2, SAVE_NRES
+ | ld BASE, L->top // Need the (realloced) L->top in BASE.
+ | move RD, MULTRES
+ | b <2
+ |. sll TMP2, TMP2, 3
+ |
+ |->vm_unwind_c: // Unwind C stack, return from vm_pcall.
+ | // (void *cframe, int errcode)
+ | move sp, CARG1
+ | move CRET1, CARG2
+ |->vm_unwind_c_eh: // Landing pad for external unwinder.
+ | ld L, SAVE_L
+ | li TMP0, ~LJ_VMST_C
+ | ld GL:TMP1, L->glref
+ | b ->vm_leave_unw
+ |. sw TMP0, GL:TMP1->vmstate
+ |
+ |->vm_unwind_ff: // Unwind C stack, return from ff pcall.
+ | // (void *cframe)
+ | li AT, -4
+ | and sp, CARG1, AT
+ |->vm_unwind_ff_eh: // Landing pad for external unwinder.
+ | ld L, SAVE_L
+ | .FPU lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | li TISNIL, LJ_TNIL
+ | li TISNUM, LJ_TISNUM
+ | ld BASE, L->base
+ | ld DISPATCH, L->glref // Setup pointer to dispatch table.
+ | .FPU mtc1 TMP3, TOBIT
+ | mov_false TMP1
+ | li_vmstate INTERP
+ | ld PC, FRAME_PC(BASE) // Fetch PC of previous frame.
+ | .FPU cvt.d.s TOBIT, TOBIT
+ | daddiu RA, BASE, -8 // Results start at BASE-8.
+ | daddiu DISPATCH, DISPATCH, GG_G2DISP
+ | sd TMP1, 0(RA) // Prepend false to error message.
+ | st_vmstate
+ | b ->vm_returnc
+ |. li RD, 16 // 2 results: false + error message.
+ |
+ |->vm_unwind_stub: // Jump to exit stub from unwinder.
+ | jr CARG1
+ |. move ra, CARG2
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Grow stack for calls -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_growstack_c: // Grow stack for C function.
+ | b >2
+ |. li CARG2, LUA_MINSTACK
+ |
+ |->vm_growstack_l: // Grow stack for Lua function.
+ | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC
+ | daddu RC, BASE, RC
+ | dsubu RA, RA, BASE
+ | sd BASE, L->base
+ | daddiu PC, PC, 4 // Must point after first instruction.
+ | sd RC, L->top
+ | srl CARG2, RA, 3
+ |2:
+ | // L->base = new base, L->top = top
+ | load_got lj_state_growstack
+ | sd PC, SAVE_PC
+ | call_intern lj_state_growstack // (lua_State *L, int n)
+ |. move CARG1, L
+ | ld BASE, L->base
+ | ld RC, L->top
+ | ld LFUNC:RB, FRAME_FUNC(BASE)
+ | dsubu RC, RC, BASE
+ | cleartp LFUNC:RB
+ | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+ | ins_callt // Just retry the call.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Entry points into the assembler VM ---------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_resume: // Setup C frame and resume thread.
+ | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
+ | saveregs
+ | move L, CARG1
+ | ld DISPATCH, L->glref // Setup pointer to dispatch table.
+ | move BASE, CARG2
+ | lbu TMP1, L->status
+ | sd L, SAVE_L
+ | li PC, FRAME_CP
+ | daddiu TMP0, sp, CFRAME_RESUME
+ | daddiu DISPATCH, DISPATCH, GG_G2DISP
+ | sw r0, SAVE_NRES
+ | sw r0, SAVE_ERRF
+ | sd CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | sd r0, SAVE_CFRAME
+ | beqz TMP1, >3
+ |. sd TMP0, L->cframe
+ |
+ | // Resume after yield (like a return).
+ | sd L, DISPATCH_GL(cur_L)(DISPATCH)
+ | move RA, BASE
+ | ld BASE, L->base
+ | ld TMP1, L->top
+ | ld PC, FRAME_PC(BASE)
+ | .FPU lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | dsubu RD, TMP1, BASE
+ | .FPU mtc1 TMP3, TOBIT
+ | sb r0, L->status
+ | .FPU cvt.d.s TOBIT, TOBIT
+ | li_vmstate INTERP
+ | daddiu RD, RD, 8
+ | st_vmstate
+ | move MULTRES, RD
+ | andi TMP0, PC, FRAME_TYPE
+ | li TISNIL, LJ_TNIL
+ | beqz TMP0, ->BC_RET_Z
+ |. li TISNUM, LJ_TISNUM
+ | b ->vm_return
+ |. nop
+ |
+ |->vm_pcall: // Setup protected C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
+ | saveregs
+ | sw CARG4, SAVE_ERRF
+ | b >1
+ |. li PC, FRAME_CP
+ |
+ |->vm_call: // Setup C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1)
+ | saveregs
+ | li PC, FRAME_C
+ |
+ |1: // Entry point for vm_pcall above (PC = ftype).
+ | ld TMP1, L:CARG1->cframe
+ | move L, CARG1
+ | sw CARG3, SAVE_NRES
+ | ld DISPATCH, L->glref // Setup pointer to dispatch table.
+ | sd CARG1, SAVE_L
+ | move BASE, CARG2
+ | daddiu DISPATCH, DISPATCH, GG_G2DISP
+ | sd CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | sd TMP1, SAVE_CFRAME
+ | sd sp, L->cframe // Add our C frame to cframe chain.
+ |
+ |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype).
+ | sd L, DISPATCH_GL(cur_L)(DISPATCH)
+ | ld TMP2, L->base // TMP2 = old base (used in vmeta_call).
+ | .FPU lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | ld TMP1, L->top
+ | .FPU mtc1 TMP3, TOBIT
+ | daddu PC, PC, BASE
+ | dsubu NARGS8:RC, TMP1, BASE
+ | li TISNUM, LJ_TISNUM
+ | dsubu PC, PC, TMP2 // PC = frame delta + frame type
+ | .FPU cvt.d.s TOBIT, TOBIT
+ | li_vmstate INTERP
+ | li TISNIL, LJ_TNIL
+ | st_vmstate
+ |
+ |->vm_call_dispatch:
+ | // TMP2 = old base, BASE = new base, RC = nargs*8, PC = caller PC
+ | ld LFUNC:RB, FRAME_FUNC(BASE)
+ | checkfunc LFUNC:RB, ->vmeta_call
+ |
+ |->vm_call_dispatch_f:
+ | ins_call
+ | // BASE = new base, RB = func, RC = nargs*8, PC = caller PC
+ |
+ |->vm_cpcall: // Setup protected C frame, call C.
+ | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
+ | saveregs
+ | move L, CARG1
+ | ld TMP0, L:CARG1->stack
+ | sd CARG1, SAVE_L
+ | ld TMP1, L->top
+ | ld DISPATCH, L->glref // Setup pointer to dispatch table.
+ | sd CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | dsubu TMP0, TMP0, TMP1 // Compute -savestack(L, L->top).
+ | ld TMP1, L->cframe
+ | daddiu DISPATCH, DISPATCH, GG_G2DISP
+ | sw TMP0, SAVE_NRES // Neg. delta means cframe w/o frame.
+ | sw r0, SAVE_ERRF // No error function.
+ | sd TMP1, SAVE_CFRAME
+ | sd sp, L->cframe // Add our C frame to cframe chain.
+ | sd L, DISPATCH_GL(cur_L)(DISPATCH)
+ | jalr CARG4 // (lua_State *L, lua_CFunction func, void *ud)
+ |. move CFUNCADDR, CARG4
+ | move BASE, CRET1
+ | bnez CRET1, <3 // Else continue with the call.
+ |. li PC, FRAME_CP
+ | b ->vm_leave_cp // No base? Just remove C frame.
+ |. nop
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Metamethod handling ------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// The lj_meta_* functions (except for lj_meta_cat) don't reallocate the
+ |// stack, so BASE doesn't need to be reloaded across these calls.
+ |
+ |//-- Continuation dispatch ----------------------------------------------
+ |
+ |->cont_dispatch:
+ | // BASE = meta base, RA = resultptr, RD = (nresults+1)*8
+ | ld TMP0, -32(BASE) // Continuation.
+ | move RB, BASE
+ | move BASE, TMP2 // Restore caller BASE.
+ | ld LFUNC:TMP1, FRAME_FUNC(TMP2)
+ |.if FFI
+ | sltiu AT, TMP0, 2
+ |.endif
+ | ld PC, -24(RB) // Restore PC from [cont|PC].
+ | cleartp LFUNC:TMP1
+ | daddu TMP2, RA, RD
+ |.if FFI
+ | bnez AT, >1
+ |.endif
+ |. sd TISNIL, -8(TMP2) // Ensure one valid arg.
+ | ld TMP1, LFUNC:TMP1->pc
+ | // BASE = base, RA = resultptr, RB = meta base
+ | jr TMP0 // Jump to continuation.
+ |. ld KBASE, PC2PROTO(k)(TMP1)
+ |
+ |.if FFI
+ |1:
+ | bnez TMP0, ->cont_ffi_callback // cont = 1: return from FFI callback.
+ | // cont = 0: tailcall from C function.
+ |. daddiu TMP1, RB, -32
+ | b ->vm_call_tail
+ |. dsubu RC, TMP1, BASE
+ |.endif
+ |
+ |->cont_cat: // RA = resultptr, RB = meta base
+ | lw INS, -4(PC)
+ | daddiu CARG2, RB, -32
+ | ld CRET1, 0(RA)
+ | decode_RB8a MULTRES, INS
+ | decode_RA8a RA, INS
+ | decode_RB8b MULTRES
+ | decode_RA8b RA
+ | daddu TMP1, BASE, MULTRES
+ | sd BASE, L->base
+ | dsubu CARG3, CARG2, TMP1
+ | bne TMP1, CARG2, ->BC_CAT_Z
+ |. sd CRET1, 0(CARG2)
+ | daddu RA, BASE, RA
+ | b ->cont_nop
+ |. sd CRET1, 0(RA)
+ |
+ |//-- Table indexing metamethods -----------------------------------------
+ |
+ |->vmeta_tgets1:
+ | daddiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | li TMP0, LJ_TSTR
+ | settp STR:RC, TMP0
+ | b >1
+ |. sd STR:RC, 0(CARG3)
+ |
+ |->vmeta_tgets:
+ | daddiu CARG2, DISPATCH, DISPATCH_GL(tmptv)
+ | li TMP0, LJ_TTAB
+ | li TMP1, LJ_TSTR
+ | settp TAB:RB, TMP0
+ | daddiu CARG3, DISPATCH, DISPATCH_GL(tmptv2)
+ | sd TAB:RB, 0(CARG2)
+ | settp STR:RC, TMP1
+ | b >1
+ |. sd STR:RC, 0(CARG3)
+ |
+ |->vmeta_tgetb: // TMP0 = index
+ | daddiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | settp TMP0, TISNUM
+ | sd TMP0, 0(CARG3)
+ |
+ |->vmeta_tgetv:
+ |1:
+ | load_got lj_meta_tget
+ | sd BASE, L->base
+ | sd PC, SAVE_PC
+ | call_intern lj_meta_tget // (lua_State *L, TValue *o, TValue *k)
+ |. move CARG1, L
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | beqz CRET1, >3
+ |. daddiu TMP1, BASE, -FRAME_CONT
+ | ld CARG1, 0(CRET1)
+ | ins_next1
+ | sd CARG1, 0(RA)
+ | ins_next2
+ |
+ |3: // Call __index metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k
+ | ld BASE, L->top
+ | sd PC, -24(BASE) // [cont|PC]
+ | dsubu PC, BASE, TMP1
+ | ld LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | cleartp LFUNC:RB
+ | b ->vm_call_dispatch_f
+ |. li NARGS8:RC, 16 // 2 args for func(t, k).
+ |
+ |->vmeta_tgetr:
+ | load_got lj_tab_getinth
+ | call_intern lj_tab_getinth // (GCtab *t, int32_t key)
+ |. nop
+ | // Returns cTValue * or NULL.
+ | beqz CRET1, ->BC_TGETR_Z
+ |. move CARG2, TISNIL
+ | b ->BC_TGETR_Z
+ |. ld CARG2, 0(CRET1)
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->vmeta_tsets1:
+ | daddiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | li TMP0, LJ_TSTR
+ | settp STR:RC, TMP0
+ | b >1
+ |. sd STR:RC, 0(CARG3)
+ |
+ |->vmeta_tsets:
+ | daddiu CARG2, DISPATCH, DISPATCH_GL(tmptv)
+ | li TMP0, LJ_TTAB
+ | li TMP1, LJ_TSTR
+ | settp TAB:RB, TMP0
+ | daddiu CARG3, DISPATCH, DISPATCH_GL(tmptv2)
+ | sd TAB:RB, 0(CARG2)
+ | settp STR:RC, TMP1
+ | b >1
+ |. sd STR:RC, 0(CARG3)
+ |
+ |->vmeta_tsetb: // TMP0 = index
+ | daddiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | settp TMP0, TISNUM
+ | sd TMP0, 0(CARG3)
+ |
+ |->vmeta_tsetv:
+ |1:
+ | load_got lj_meta_tset
+ | sd BASE, L->base
+ | sd PC, SAVE_PC
+ | call_intern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
+ |. move CARG1, L
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | beqz CRET1, >3
+ |. ld CARG1, 0(RA)
+ | // NOBARRIER: lj_meta_tset ensures the table is not black.
+ | ins_next1
+ | sd CARG1, 0(CRET1)
+ | ins_next2
+ |
+ |3: // Call __newindex metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
+ | daddiu TMP1, BASE, -FRAME_CONT
+ | ld BASE, L->top
+ | sd PC, -24(BASE) // [cont|PC]
+ | dsubu PC, BASE, TMP1
+ | ld LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | cleartp LFUNC:RB
+ | sd CARG1, 16(BASE) // Copy value to third argument.
+ | b ->vm_call_dispatch_f
+ |. li NARGS8:RC, 24 // 3 args for func(t, k, v)
+ |
+ |->vmeta_tsetr:
+ | load_got lj_tab_setinth
+ | sd BASE, L->base
+ | sd PC, SAVE_PC
+ | call_intern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key)
+ |. move CARG1, L
+ | // Returns TValue *.
+ | b ->BC_TSETR_Z
+ |. nop
+ |
+ |//-- Comparison metamethods ---------------------------------------------
+ |
+ |->vmeta_comp:
+ | // RA/RD point to o1/o2.
+ | move CARG2, RA
+ | move CARG3, RD
+ | load_got lj_meta_comp
+ | daddiu PC, PC, -4
+ | sd BASE, L->base
+ | sd PC, SAVE_PC
+ | decode_OP1 CARG4, INS
+ | call_intern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op)
+ |. move CARG1, L
+ | // Returns 0/1 or TValue * (metamethod).
+ |3:
+ | sltiu AT, CRET1, 2
+ | beqz AT, ->vmeta_binop
+ | negu TMP2, CRET1
+ |4:
+ | lhu RD, OFS_RD(PC)
+ | daddiu PC, PC, 4
+ | lui TMP1, (-(BCBIAS_J*4 >> 16) & 65535)
+ | sll RD, RD, 2
+ | addu RD, RD, TMP1
+ | and RD, RD, TMP2
+ | daddu PC, PC, RD
+ |->cont_nop:
+ | ins_next
+ |
+ |->cont_ra: // RA = resultptr
+ | lbu TMP1, -4+OFS_RA(PC)
+ | ld CRET1, 0(RA)
+ | sll TMP1, TMP1, 3
+ | daddu TMP1, BASE, TMP1
+ | b ->cont_nop
+ |. sd CRET1, 0(TMP1)
+ |
+ |->cont_condt: // RA = resultptr
+ | ld TMP0, 0(RA)
+ | gettp TMP0, TMP0
+ | sltiu AT, TMP0, LJ_TISTRUECOND
+ | b <4
+ |. negu TMP2, AT // Branch if result is true.
+ |
+ |->cont_condf: // RA = resultptr
+ | ld TMP0, 0(RA)
+ | gettp TMP0, TMP0
+ | sltiu AT, TMP0, LJ_TISTRUECOND
+ | b <4
+ |. addiu TMP2, AT, -1 // Branch if result is false.
+ |
+ |->vmeta_equal:
+ | // CARG1/CARG2 point to o1/o2. TMP0 is set to 0/1.
+ | load_got lj_meta_equal
+ | cleartp LFUNC:CARG3, CARG2
+ | cleartp LFUNC:CARG2, CARG1
+ | move CARG4, TMP0
+ | daddiu PC, PC, -4
+ | sd BASE, L->base
+ | sd PC, SAVE_PC
+ | call_intern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne)
+ |. move CARG1, L
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+ |. nop
+ |
+ |->vmeta_equal_cd:
+ |.if FFI
+ | load_got lj_meta_equal_cd
+ | move CARG2, INS
+ | daddiu PC, PC, -4
+ | sd BASE, L->base
+ | sd PC, SAVE_PC
+ | call_intern lj_meta_equal_cd // (lua_State *L, BCIns op)
+ |. move CARG1, L
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+ |. nop
+ |.endif
+ |
+ |->vmeta_istype:
+ | load_got lj_meta_istype
+ | daddiu PC, PC, -4
+ | sd BASE, L->base
+ | srl CARG2, RA, 3
+ | srl CARG3, RD, 3
+ | sd PC, SAVE_PC
+ | call_intern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp)
+ |. move CARG1, L
+ | b ->cont_nop
+ |. nop
+ |
+ |//-- Arithmetic metamethods ---------------------------------------------
+ |
+ |->vmeta_unm:
+ | move RC, RB
+ |
+ |->vmeta_arith:
+ | load_got lj_meta_arith
+ | sd BASE, L->base
+ | move CARG2, RA
+ | sd PC, SAVE_PC
+ | move CARG3, RB
+ | move CARG4, RC
+ | decode_OP1 CARG5, INS // CARG5 == RB.
+ | call_intern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
+ |. move CARG1, L
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | beqz CRET1, ->cont_nop
+ |. nop
+ |
+ | // Call metamethod for binary op.
+ |->vmeta_binop:
+ | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2
+ | dsubu TMP1, CRET1, BASE
+ | sd PC, -24(CRET1) // [cont|PC]
+ | move TMP2, BASE
+ | daddiu PC, TMP1, FRAME_CONT
+ | move BASE, CRET1
+ | b ->vm_call_dispatch
+ |. li NARGS8:RC, 16 // 2 args for func(o1, o2).
+ |
+ |->vmeta_len:
+ | // CARG2 already set by BC_LEN.
+#if LJ_52
+ | move MULTRES, CARG1
+#endif
+ | load_got lj_meta_len
+ | sd BASE, L->base
+ | sd PC, SAVE_PC
+ | call_intern lj_meta_len // (lua_State *L, TValue *o)
+ |. move CARG1, L
+ | // Returns NULL (retry) or TValue * (metamethod base).
+#if LJ_52
+ | bnez CRET1, ->vmeta_binop // Binop call for compatibility.
+ |. nop
+ | b ->BC_LEN_Z
+ |. move CARG1, MULTRES
+#else
+ | b ->vmeta_binop // Binop call for compatibility.
+ |. nop
+#endif
+ |
+ |//-- Call metamethod ----------------------------------------------------
+ |
+ |->vmeta_call: // Resolve and call __call metamethod.
+ | // TMP2 = old base, BASE = new base, RC = nargs*8
+ | load_got lj_meta_call
+ | sd TMP2, L->base // This is the callers base!
+ | daddiu CARG2, BASE, -16
+ | sd PC, SAVE_PC
+ | daddu CARG3, BASE, RC
+ | move MULTRES, NARGS8:RC
+ | call_intern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ |. move CARG1, L
+ | ld LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | daddiu NARGS8:RC, MULTRES, 8 // Got one more argument now.
+ | cleartp LFUNC:RB
+ | ins_call
+ |
+ |->vmeta_callt: // Resolve __call for BC_CALLT.
+ | // BASE = old base, RA = new base, RC = nargs*8
+ | load_got lj_meta_call
+ | sd BASE, L->base
+ | daddiu CARG2, RA, -16
+ | sd PC, SAVE_PC
+ | daddu CARG3, RA, RC
+ | move MULTRES, NARGS8:RC
+ | call_intern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ |. move CARG1, L
+ | ld RB, FRAME_FUNC(RA) // Guaranteed to be a function here.
+ | ld TMP1, FRAME_PC(BASE)
+ | daddiu NARGS8:RC, MULTRES, 8 // Got one more argument now.
+ | b ->BC_CALLT_Z
+ |. cleartp LFUNC:CARG3, RB
+ |
+ |//-- Argument coercion for 'for' statement ------------------------------
+ |
+ |->vmeta_for:
+ | load_got lj_meta_for
+ | sd BASE, L->base
+ | move CARG2, RA
+ | sd PC, SAVE_PC
+ | move MULTRES, INS
+ | call_intern lj_meta_for // (lua_State *L, TValue *base)
+ |. move CARG1, L
+ |.if JIT
+ | decode_OP1 TMP0, MULTRES
+ | li AT, BC_JFORI
+ |.endif
+ | decode_RA8a RA, MULTRES
+ | decode_RD8a RD, MULTRES
+ | decode_RA8b RA
+ |.if JIT
+ | beq TMP0, AT, =>BC_JFORI
+ |. decode_RD8b RD
+ | b =>BC_FORI
+ |. nop
+ |.else
+ | b =>BC_FORI
+ |. decode_RD8b RD
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Fast functions -----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro .ffunc, name
+ |->ff_ .. name:
+ |.endmacro
+ |
+ |.macro .ffunc_1, name
+ |->ff_ .. name:
+ | beqz NARGS8:RC, ->fff_fallback
+ |. ld CARG1, 0(BASE)
+ |.endmacro
+ |
+ |.macro .ffunc_2, name
+ |->ff_ .. name:
+ | sltiu AT, NARGS8:RC, 16
+ | ld CARG1, 0(BASE)
+ | bnez AT, ->fff_fallback
+ |. ld CARG2, 8(BASE)
+ |.endmacro
+ |
+ |.macro .ffunc_n, name // Caveat: has delay slot!
+ |->ff_ .. name:
+ | ld CARG1, 0(BASE)
+ | beqz NARGS8:RC, ->fff_fallback
+ | // Either ldc1 or the 1st instruction of checknum is in the delay slot.
+ | .FPU ldc1 FARG1, 0(BASE)
+ | checknum CARG1, ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_nn, name // Caveat: has delay slot!
+ |->ff_ .. name:
+ | ld CARG1, 0(BASE)
+ | sltiu AT, NARGS8:RC, 16
+ | ld CARG2, 8(BASE)
+ | bnez AT, ->fff_fallback
+ |. gettp TMP0, CARG1
+ | gettp TMP1, CARG2
+ | sltiu TMP0, TMP0, LJ_TISNUM
+ | sltiu TMP1, TMP1, LJ_TISNUM
+ | .FPU ldc1 FARG1, 0(BASE)
+ | and TMP0, TMP0, TMP1
+ | .FPU ldc1 FARG2, 8(BASE)
+ | beqz TMP0, ->fff_fallback
+ |.endmacro
+ |
+ |// Inlined GC threshold check. Caveat: uses TMP0 and TMP1 and has delay slot!
+ |// MIPSR6: no delay slot, but a forbidden slot.
+ |.macro ffgccheck
+ | ld TMP0, DISPATCH_GL(gc.total)(DISPATCH)
+ | ld TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
+ | dsubu AT, TMP0, TMP1
+ |.if MIPSR6
+ | bgezalc AT, ->fff_gcstep
+ |.else
+ | bgezal AT, ->fff_gcstep
+ |.endif
+ |.endmacro
+ |
+ |//-- Base library: checks -----------------------------------------------
+ |.ffunc_1 assert
+ | gettp AT, CARG1
+ | sltiu AT, AT, LJ_TISTRUECOND
+ | beqz AT, ->fff_fallback
+ |. daddiu RA, BASE, -16
+ | ld PC, FRAME_PC(BASE)
+ | addiu RD, NARGS8:RC, 8 // Compute (nresults+1)*8.
+ | daddu TMP2, RA, RD
+ | daddiu TMP1, BASE, 8
+ | beq BASE, TMP2, ->fff_res // Done if exactly 1 argument.
+ |. sd CARG1, 0(RA)
+ |1:
+ | ld CRET1, 0(TMP1)
+ | sd CRET1, -16(TMP1)
+ | bne TMP1, TMP2, <1
+ |. daddiu TMP1, TMP1, 8
+ | b ->fff_res
+ |. nop
+ |
+ |.ffunc_1 type
+ | gettp TMP0, CARG1
+ | sltu TMP1, TISNUM, TMP0
+ | not TMP2, TMP0
+ | li TMP3, ~LJ_TISNUM
+ |.if MIPSR6
+ | selnez TMP2, TMP2, TMP1
+ | seleqz TMP3, TMP3, TMP1
+ | or TMP2, TMP2, TMP3
+ |.else
+ | movz TMP2, TMP3, TMP1
+ |.endif
+ | dsll TMP2, TMP2, 3
+ | daddu TMP2, CFUNC:RB, TMP2
+ | b ->fff_restv
+ |. ld CARG1, CFUNC:TMP2->upvalue
+ |
+ |//-- Base library: getters and setters ---------------------------------
+ |
+ |.ffunc_1 getmetatable
+ | gettp TMP2, CARG1
+ | daddiu TMP0, TMP2, -LJ_TTAB
+ | daddiu TMP1, TMP2, -LJ_TUDATA
+ |.if MIPSR6
+ | selnez TMP0, TMP1, TMP0
+ |.else
+ | movn TMP0, TMP1, TMP0
+ |.endif
+ | bnez TMP0, >6
+ |. cleartp TAB:CARG1
+ |1: // Field metatable must be at same offset for GCtab and GCudata!
+ | ld TAB:RB, TAB:CARG1->metatable
+ |2:
+ | ld STR:RC, DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])(DISPATCH)
+ | beqz TAB:RB, ->fff_restv
+ |. li CARG1, LJ_TNIL
+ | lw TMP0, TAB:RB->hmask
+ | lw TMP1, STR:RC->sid
+ | ld NODE:TMP2, TAB:RB->node
+ | and TMP1, TMP1, TMP0 // idx = str->sid & tab->hmask
+ | dsll TMP0, TMP1, 5
+ | dsll TMP1, TMP1, 3
+ | dsubu TMP1, TMP0, TMP1
+ | daddu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ | li CARG4, LJ_TSTR
+ | settp STR:RC, CARG4 // Tagged key to look for.
+ |3: // Rearranged logic, because we expect _not_ to find the key.
+ | ld TMP0, NODE:TMP2->key
+ | ld CARG1, NODE:TMP2->val
+ | ld NODE:TMP2, NODE:TMP2->next
+ | beq RC, TMP0, >5
+ |. li AT, LJ_TTAB
+ | bnez NODE:TMP2, <3
+ |. nop
+ |4:
+ | move CARG1, RB
+ | b ->fff_restv // Not found, keep default result.
+ |. settp CARG1, AT
+ |5:
+ | bne CARG1, TISNIL, ->fff_restv
+ |. nop
+ | b <4 // Ditto for nil value.
+ |. nop
+ |
+ |6:
+ | sltiu AT, TMP2, LJ_TISNUM
+ |.if MIPSR6
+ | selnez TMP0, TISNUM, AT
+ | seleqz AT, TMP2, AT
+ | or TMP2, TMP0, AT
+ |.else
+ | movn TMP2, TISNUM, AT
+ |.endif
+ | dsll TMP2, TMP2, 3
+ | dsubu TMP0, DISPATCH, TMP2
+ | b <2
+ |. ld TAB:RB, DISPATCH_GL(gcroot[GCROOT_BASEMT])-8(TMP0)
+ |
+ |.ffunc_2 setmetatable
+ | // Fast path: no mt for table yet and not clearing the mt.
+ | checktp TMP1, CARG1, -LJ_TTAB, ->fff_fallback
+ | gettp TMP3, CARG2
+ | ld TAB:TMP0, TAB:TMP1->metatable
+ | lbu TMP2, TAB:TMP1->marked
+ | daddiu AT, TMP3, -LJ_TTAB
+ | cleartp TAB:CARG2
+ | or AT, AT, TAB:TMP0
+ | bnez AT, ->fff_fallback
+ |. andi AT, TMP2, LJ_GC_BLACK // isblack(table)
+ | beqz AT, ->fff_restv
+ |. sd TAB:CARG2, TAB:TMP1->metatable
+ | barrierback TAB:TMP1, TMP2, TMP0, ->fff_restv
+ |
+ |.ffunc rawget
+ | ld CARG2, 0(BASE)
+ | sltiu AT, NARGS8:RC, 16
+ | load_got lj_tab_get
+ | gettp TMP0, CARG2
+ | cleartp CARG2
+ | daddiu TMP0, TMP0, -LJ_TTAB
+ | or AT, AT, TMP0
+ | bnez AT, ->fff_fallback
+ |. daddiu CARG3, BASE, 8
+ | call_intern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
+ |. move CARG1, L
+ | b ->fff_restv
+ |. ld CARG1, 0(CRET1)
+ |
+ |//-- Base library: conversions ------------------------------------------
+ |
+ |.ffunc tonumber
+ | // Only handles the number case inline (without a base argument).
+ | ld CARG1, 0(BASE)
+ | xori AT, NARGS8:RC, 8 // Exactly one number argument.
+ | gettp TMP1, CARG1
+ | sltu TMP0, TISNUM, TMP1
+ | or AT, AT, TMP0
+ | bnez AT, ->fff_fallback
+ |. nop
+ | b ->fff_restv
+ |. nop
+ |
+ |.ffunc_1 tostring
+ | // Only handles the string or number case inline.
+ | gettp TMP0, CARG1
+ | daddiu AT, TMP0, -LJ_TSTR
+ | // A __tostring method in the string base metatable is ignored.
+ | beqz AT, ->fff_restv // String key?
+ | // Handle numbers inline, unless a number base metatable is present.
+ |. ld TMP1, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])(DISPATCH)
+ | sltu TMP0, TISNUM, TMP0
+ | or TMP0, TMP0, TMP1
+ | bnez TMP0, ->fff_fallback
+ |. sd BASE, L->base // Add frame since C call can throw.
+ |.if MIPSR6
+ | sd PC, SAVE_PC // Redundant (but a defined value).
+ | ffgccheck
+ |.else
+ | ffgccheck
+ |. sd PC, SAVE_PC // Redundant (but a defined value).
+ |.endif
+ | load_got lj_strfmt_number
+ | move CARG1, L
+ | call_intern lj_strfmt_number // (lua_State *L, cTValue *o)
+ |. move CARG2, BASE
+ | // Returns GCstr *.
+ | li AT, LJ_TSTR
+ | settp CRET1, AT
+ | b ->fff_restv
+ |. move CARG1, CRET1
+ |
+ |//-- Base library: iterators -------------------------------------------
+ |
+ |.ffunc_1 next
+ | checktp CARG1, -LJ_TTAB, ->fff_fallback
+ | daddu TMP2, BASE, NARGS8:RC
+ | sd TISNIL, 0(TMP2) // Set missing 2nd arg to nil.
+ | load_got lj_tab_next
+ | ld PC, FRAME_PC(BASE)
+ | daddiu CARG2, BASE, 8
+ | call_intern lj_tab_next // (GCtab *t, cTValue *key, TValue *o)
+ |. daddiu CARG3, BASE, -16
+ | // Returns 1=found, 0=end, -1=error.
+ | daddiu RA, BASE, -16
+ | bgtz CRET1, ->fff_res // Found key/value.
+ |. li RD, (2+1)*8
+ | beqz CRET1, ->fff_restv // End of traversal: return nil.
+ |. move CARG1, TISNIL
+ | ld CFUNC:RB, FRAME_FUNC(BASE)
+ | cleartp CFUNC:RB
+ | b ->fff_fallback // Invalid key.
+ |. li RC, 2*8
+ |
+ |.ffunc_1 pairs
+ | checktp TAB:TMP1, CARG1, -LJ_TTAB, ->fff_fallback
+ | ld PC, FRAME_PC(BASE)
+#if LJ_52
+ | ld TAB:TMP2, TAB:TMP1->metatable
+ | ld TMP0, CFUNC:RB->upvalue[0]
+ | bnez TAB:TMP2, ->fff_fallback
+#else
+ | ld TMP0, CFUNC:RB->upvalue[0]
+#endif
+ |. daddiu RA, BASE, -16
+ | sd TISNIL, 0(BASE)
+ | sd CARG1, -8(BASE)
+ | sd TMP0, 0(RA)
+ | b ->fff_res
+ |. li RD, (3+1)*8
+ |
+ |.ffunc_2 ipairs_aux
+ | checktab CARG1, ->fff_fallback
+ | checkint CARG2, ->fff_fallback
+ |. lw TMP0, TAB:CARG1->asize
+ | ld TMP1, TAB:CARG1->array
+ | ld PC, FRAME_PC(BASE)
+ | sextw TMP2, CARG2
+ | addiu TMP2, TMP2, 1
+ | sltu AT, TMP2, TMP0
+ | daddiu RA, BASE, -16
+ | zextw TMP0, TMP2
+ | settp TMP0, TISNUM
+ | beqz AT, >2 // Not in array part?
+ |. sd TMP0, 0(RA)
+ | dsll TMP3, TMP2, 3
+ | daddu TMP3, TMP1, TMP3
+ | ld TMP1, 0(TMP3)
+ |1:
+ | beq TMP1, TISNIL, ->fff_res // End of iteration, return 0 results.
+ |. li RD, (0+1)*8
+ | sd TMP1, -8(BASE)
+ | b ->fff_res
+ |. li RD, (2+1)*8
+ |2: // Check for empty hash part first. Otherwise call C function.
+ | lw TMP0, TAB:CARG1->hmask
+ | load_got lj_tab_getinth
+ | beqz TMP0, ->fff_res
+ |. li RD, (0+1)*8
+ | call_intern lj_tab_getinth // (GCtab *t, int32_t key)
+ |. move CARG2, TMP2
+ | // Returns cTValue * or NULL.
+ | beqz CRET1, ->fff_res
+ |. li RD, (0+1)*8
+ | b <1
+ |. ld TMP1, 0(CRET1)
+ |
+ |.ffunc_1 ipairs
+ | checktp TAB:TMP1, CARG1, -LJ_TTAB, ->fff_fallback
+ | ld PC, FRAME_PC(BASE)
+#if LJ_52
+ | ld TAB:TMP2, TAB:TMP1->metatable
+ | ld CFUNC:TMP0, CFUNC:RB->upvalue[0]
+ | bnez TAB:TMP2, ->fff_fallback
+#else
+ | ld TMP0, CFUNC:RB->upvalue[0]
+#endif
+ | daddiu RA, BASE, -16
+ | dsll AT, TISNUM, 47
+ | sd CARG1, -8(BASE)
+ | sd AT, 0(BASE)
+ | sd CFUNC:TMP0, 0(RA)
+ | b ->fff_res
+ |. li RD, (3+1)*8
+ |
+ |//-- Base library: catch errors ----------------------------------------
+ |
+ |.ffunc pcall
++ | ld TMP1, L->maxstack
++ | daddu TMP2, BASE, NARGS8:RC
++ | sltu AT, TMP1, TMP2
++ | bnez AT, ->fff_fallback
++ |. lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | daddiu NARGS8:RC, NARGS8:RC, -8
- | ld CARG1, 0(BASE)
+ | bltz NARGS8:RC, ->fff_fallback
+ |. move TMP2, BASE
+ | daddiu BASE, BASE, 16
+ | // Remember active hook before pcall.
+ | srl TMP3, TMP3, HOOK_ACTIVE_SHIFT
+ | andi TMP3, TMP3, 1
+ | daddiu PC, TMP3, 16+FRAME_PCALL
+ | beqz NARGS8:RC, ->vm_call_dispatch
+ |1:
+ |. daddu TMP0, BASE, NARGS8:RC
+ |2:
+ | ld TMP1, -16(TMP0)
+ | sd TMP1, -8(TMP0)
+ | daddiu TMP0, TMP0, -8
+ | bne TMP0, BASE, <2
+ |. nop
+ | b ->vm_call_dispatch
+ |. nop
+ |
+ |.ffunc xpcall
++ | ld TMP1, L->maxstack
++ | daddu TMP2, BASE, NARGS8:RC
++ | sltu AT, TMP1, TMP2
++ | bnez AT, ->fff_fallback
++ |. ld CARG1, 0(BASE)
+ | daddiu NARGS8:TMP0, NARGS8:RC, -16
+ | ld CARG2, 8(BASE)
+ | bltz NARGS8:TMP0, ->fff_fallback
+ |. lbu TMP1, DISPATCH_GL(hookmask)(DISPATCH)
+ | gettp AT, CARG2
+ | daddiu AT, AT, -LJ_TFUNC
+ | bnez AT, ->fff_fallback // Traceback must be a function.
+ |. move TMP2, BASE
+ | move NARGS8:RC, NARGS8:TMP0
+ | daddiu BASE, BASE, 24
+ | // Remember active hook before pcall.
+ | srl TMP3, TMP3, HOOK_ACTIVE_SHIFT
+ | sd CARG2, 0(TMP2) // Swap function and traceback.
+ | andi TMP3, TMP3, 1
+ | sd CARG1, 8(TMP2)
+ | beqz NARGS8:RC, ->vm_call_dispatch
+ |. daddiu PC, TMP3, 24+FRAME_PCALL
+ | b <1
+ |. nop
+ |
+ |//-- Coroutine library --------------------------------------------------
+ |
+ |.macro coroutine_resume_wrap, resume
+ |.if resume
+ |.ffunc_1 coroutine_resume
+ | checktp CARG1, CARG1, -LJ_TTHREAD, ->fff_fallback
+ |.else
+ |.ffunc coroutine_wrap_aux
+ | ld L:CARG1, CFUNC:RB->upvalue[0].gcr
+ | cleartp L:CARG1
+ |.endif
+ | lbu TMP0, L:CARG1->status
+ | ld TMP1, L:CARG1->cframe
+ | ld CARG2, L:CARG1->top
+ | ld TMP2, L:CARG1->base
+ | addiu AT, TMP0, -LUA_YIELD
+ | daddu CARG3, CARG2, TMP0
+ | daddiu TMP3, CARG2, 8
+ |.if MIPSR6
+ | seleqz CARG2, CARG2, AT
+ | selnez TMP3, TMP3, AT
+ | bgtz AT, ->fff_fallback // st > LUA_YIELD?
+ |. or CARG2, TMP3, CARG2
+ |.else
+ | bgtz AT, ->fff_fallback // st > LUA_YIELD?
+ |. movn CARG2, TMP3, AT
+ |.endif
+ | xor TMP2, TMP2, CARG3
+ | bnez TMP1, ->fff_fallback // cframe != 0?
+ |. or AT, TMP2, TMP0
+ | ld TMP0, L:CARG1->maxstack
+ | beqz AT, ->fff_fallback // base == top && st == 0?
+ |. ld PC, FRAME_PC(BASE)
+ | daddu TMP2, CARG2, NARGS8:RC
+ | sltu AT, TMP0, TMP2
+ | bnez AT, ->fff_fallback // Stack overflow?
+ |. sd PC, SAVE_PC
+ | sd BASE, L->base
+ |1:
+ |.if resume
+ | daddiu BASE, BASE, 8 // Keep resumed thread in stack for GC.
+ | daddiu NARGS8:RC, NARGS8:RC, -8
+ | daddiu TMP2, TMP2, -8
+ |.endif
+ | sd TMP2, L:CARG1->top
+ | daddu TMP1, BASE, NARGS8:RC
+ | move CARG3, CARG2
+ | sd BASE, L->top
+ |2: // Move args to coroutine.
+ | ld CRET1, 0(BASE)
+ | sltu AT, BASE, TMP1
+ | beqz AT, >3
+ |. daddiu BASE, BASE, 8
+ | sd CRET1, 0(CARG3)
+ | b <2
+ |. daddiu CARG3, CARG3, 8
+ |3:
+ | bal ->vm_resume // (lua_State *L, TValue *base, 0, 0)
+ |. move L:RA, L:CARG1
+ | // Returns thread status.
+ |4:
+ | ld TMP2, L:RA->base
+ | sltiu AT, CRET1, LUA_YIELD+1
+ | ld TMP3, L:RA->top
+ | li_vmstate INTERP
+ | ld BASE, L->base
+ | sd L, DISPATCH_GL(cur_L)(DISPATCH)
+ | st_vmstate
+ | beqz AT, >8
+ |. dsubu RD, TMP3, TMP2
+ | ld TMP0, L->maxstack
+ | beqz RD, >6 // No results?
+ |. daddu TMP1, BASE, RD
+ | sltu AT, TMP0, TMP1
+ | bnez AT, >9 // Need to grow stack?
+ |. daddu TMP3, TMP2, RD
+ | sd TMP2, L:RA->top // Clear coroutine stack.
+ | move TMP1, BASE
+ |5: // Move results from coroutine.
+ | ld CRET1, 0(TMP2)
+ | daddiu TMP2, TMP2, 8
+ | sltu AT, TMP2, TMP3
+ | sd CRET1, 0(TMP1)
+ | bnez AT, <5
+ |. daddiu TMP1, TMP1, 8
+ |6:
+ | andi TMP0, PC, FRAME_TYPE
+ |.if resume
+ | mov_true TMP1
+ | daddiu RA, BASE, -8
+ | sd TMP1, -8(BASE) // Prepend true to results.
+ | daddiu RD, RD, 16
+ |.else
+ | move RA, BASE
+ | daddiu RD, RD, 8
+ |.endif
+ |7:
+ | sd PC, SAVE_PC
+ | beqz TMP0, ->BC_RET_Z
+ |. move MULTRES, RD
+ | b ->vm_return
+ |. nop
+ |
+ |8: // Coroutine returned with error (at co->top-1).
+ |.if resume
+ | daddiu TMP3, TMP3, -8
+ | mov_false TMP1
+ | ld CRET1, 0(TMP3)
+ | sd TMP3, L:RA->top // Remove error from coroutine stack.
+ | li RD, (2+1)*8
+ | sd TMP1, -8(BASE) // Prepend false to results.
+ | daddiu RA, BASE, -8
+ | sd CRET1, 0(BASE) // Copy error message.
+ | b <7
+ |. andi TMP0, PC, FRAME_TYPE
+ |.else
+ | load_got lj_ffh_coroutine_wrap_err
+ | move CARG2, L:RA
+ | call_intern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co)
+ |. move CARG1, L
+ |.endif
+ |
+ |9: // Handle stack expansion on return from yield.
+ | load_got lj_state_growstack
+ | srl CARG2, RD, 3
+ | call_intern lj_state_growstack // (lua_State *L, int n)
+ |. move CARG1, L
+ | b <4
+ |. li CRET1, 0
+ |.endmacro
+ |
+ | coroutine_resume_wrap 1 // coroutine.resume
+ | coroutine_resume_wrap 0 // coroutine.wrap
+ |
+ |.ffunc coroutine_yield
+ | ld TMP0, L->cframe
+ | daddu TMP1, BASE, NARGS8:RC
+ | sd BASE, L->base
+ | andi TMP0, TMP0, CFRAME_RESUME
+ | sd TMP1, L->top
+ | beqz TMP0, ->fff_fallback
+ |. li CRET1, LUA_YIELD
+ | sd r0, L->cframe
+ | b ->vm_leave_unw
+ |. sb CRET1, L->status
+ |
+ |//-- Math library -------------------------------------------------------
+ |
+ |.ffunc_1 math_abs
+ | gettp CARG2, CARG1
+ | daddiu AT, CARG2, -LJ_TISNUM
+ | bnez AT, >1
+ |. sextw TMP1, CARG1
+ | sra TMP0, TMP1, 31 // Extract sign.
+ | xor TMP1, TMP1, TMP0
+ | dsubu CARG1, TMP1, TMP0
+ | dsll TMP3, CARG1, 32
+ | bgez TMP3, ->fff_restv
+ |. settp CARG1, TISNUM
+ | li CARG1, 0x41e0 // 2^31 as a double.
+ | b ->fff_restv
+ |. dsll CARG1, CARG1, 48
+ |1:
+ | sltiu AT, CARG2, LJ_TISNUM
+ | beqz AT, ->fff_fallback
+ |. dextm CARG1, CARG1, 0, 30
+ |// fallthrough
+ |
+ |->fff_restv:
+ | // CARG1 = TValue result.
+ | ld PC, FRAME_PC(BASE)
+ | daddiu RA, BASE, -16
+ | sd CARG1, -16(BASE)
+ |->fff_res1:
+ | // RA = results, PC = return.
+ | li RD, (1+1)*8
+ |->fff_res:
+ | // RA = results, RD = (nresults+1)*8, PC = return.
+ | andi TMP0, PC, FRAME_TYPE
+ | bnez TMP0, ->vm_return
+ |. move MULTRES, RD
+ | lw INS, -4(PC)
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ |5:
+ | sltu AT, RD, RB
+ | bnez AT, >6 // More results expected?
+ |. decode_RA8a TMP0, INS
+ | decode_RA8b TMP0
+ | ins_next1
+ | // Adjust BASE. KBASE is assumed to be set for the calling frame.
+ | dsubu BASE, RA, TMP0
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | daddu TMP1, RA, RD
+ | daddiu RD, RD, 8
+ | b <5
+ |. sd TISNIL, -8(TMP1)
+ |
+ |.macro math_extern, func
+ | .ffunc_n math_ .. func
+ | load_got func
+ | call_extern
+ |. nop
+ | b ->fff_resn
+ |. nop
+ |.endmacro
+ |
+ |.macro math_extern2, func
+ | .ffunc_nn math_ .. func
+ |. load_got func
+ | call_extern
+ |. nop
+ | b ->fff_resn
+ |. nop
+ |.endmacro
+ |
+ |// TODO: Return integer type if result is integer (own sf implementation).
+ |.macro math_round, func
+ |->ff_math_ .. func:
+ | ld CARG1, 0(BASE)
+ | beqz NARGS8:RC, ->fff_fallback
+ |. gettp TMP0, CARG1
+ | beq TMP0, TISNUM, ->fff_restv
+ |. sltu AT, TMP0, TISNUM
+ | beqz AT, ->fff_fallback
+ |.if FPU
+ |. ldc1 FARG1, 0(BASE)
+ | bal ->vm_ .. func
+ |. nop
+ |.else
+ |. load_got func
+ | call_extern
+ |. nop
+ |.endif
+ | b ->fff_resn
+ |. nop
+ |.endmacro
+ |
+ | math_round floor
+ | math_round ceil
+ |
+ |.ffunc math_log
+ | li AT, 8
+ | bne NARGS8:RC, AT, ->fff_fallback // Exactly 1 argument.
+ |. ld CARG1, 0(BASE)
+ | checknum CARG1, ->fff_fallback
+ |. load_got log
+ |.if FPU
+ | call_extern
+ |. ldc1 FARG1, 0(BASE)
+ |.else
+ | call_extern
+ |. nop
+ |.endif
+ | b ->fff_resn
+ |. nop
+ |
+ | math_extern log10
+ | math_extern exp
+ | math_extern sin
+ | math_extern cos
+ | math_extern tan
+ | math_extern asin
+ | math_extern acos
+ | math_extern atan
+ | math_extern sinh
+ | math_extern cosh
+ | math_extern tanh
+ | math_extern2 pow
+ | math_extern2 atan2
+ | math_extern2 fmod
+ |
+ |.if FPU
+ |.ffunc_n math_sqrt
+ |. sqrt.d FRET1, FARG1
+ |// fallthrough to ->fff_resn
+ |.else
+ | math_extern sqrt
+ |.endif
+ |
+ |->fff_resn:
+ | ld PC, FRAME_PC(BASE)
+ | daddiu RA, BASE, -16
+ | b ->fff_res1
+ |.if FPU
+ |. sdc1 FRET1, 0(RA)
+ |.else
+ |. sd CRET1, 0(RA)
+ |.endif
+ |
+ |
+ |.ffunc_2 math_ldexp
+ | checknum CARG1, ->fff_fallback
+ | checkint CARG2, ->fff_fallback
+ |. load_got ldexp
+ | .FPU ldc1 FARG1, 0(BASE)
+ | call_extern
+ |. lw CARG2, 8+LO(BASE)
+ | b ->fff_resn
+ |. nop
+ |
+ |.ffunc_n math_frexp
+ | load_got frexp
+ | ld PC, FRAME_PC(BASE)
+ | call_extern
+ |. daddiu CARG2, DISPATCH, DISPATCH_GL(tmptv)
+ | lw TMP1, DISPATCH_GL(tmptv)(DISPATCH)
+ | daddiu RA, BASE, -16
+ |.if FPU
+ | mtc1 TMP1, FARG2
+ | sdc1 FRET1, 0(RA)
+ | cvt.d.w FARG2, FARG2
+ | sdc1 FARG2, 8(RA)
+ |.else
+ | sd CRET1, 0(RA)
+ | zextw TMP1, TMP1
+ | settp TMP1, TISNUM
+ | sd TMP1, 8(RA)
+ |.endif
+ | b ->fff_res
+ |. li RD, (2+1)*8
+ |
+ |.ffunc_n math_modf
+ | load_got modf
+ | ld PC, FRAME_PC(BASE)
+ | call_extern
+ |. daddiu CARG2, BASE, -16
+ | daddiu RA, BASE, -16
+ |.if FPU
+ | sdc1 FRET1, -8(BASE)
+ |.else
+ | sd CRET1, -8(BASE)
+ |.endif
+ | b ->fff_res
+ |. li RD, (2+1)*8
+ |
+ |.macro math_minmax, name, intins, intinsc, fpins
+ | .ffunc_1 name
+ | daddu TMP3, BASE, NARGS8:RC
+ | checkint CARG1, >5
+ |. daddiu TMP2, BASE, 8
+ |1: // Handle integers.
+ | beq TMP2, TMP3, ->fff_restv
+ |. ld CARG2, 0(TMP2)
+ | checkint CARG2, >3
+ |. sextw CARG1, CARG1
+ | lw CARG2, LO(TMP2)
+ |. slt AT, CARG1, CARG2
+ |.if MIPSR6
+ | intins TMP1, CARG2, AT
+ | intinsc CARG1, CARG1, AT
+ | or CARG1, CARG1, TMP1
+ |.else
+ | intins CARG1, CARG2, AT
+ |.endif
+ | daddiu TMP2, TMP2, 8
+ | zextw CARG1, CARG1
+ | b <1
+ |. settp CARG1, TISNUM
+ |
+ |3: // Convert intermediate result to number and continue with number loop.
+ | checknum CARG2, ->fff_fallback
+ |.if FPU
+ |. mtc1 CARG1, FRET1
+ | cvt.d.w FRET1, FRET1
+ | b >7
+ |. ldc1 FARG1, 0(TMP2)
+ |.else
+ |. nop
+ | bal ->vm_sfi2d_1
+ |. nop
+ | b >7
+ |. nop
+ |.endif
+ |
+ |5:
+ | .FPU ldc1 FRET1, 0(BASE)
+ | checknum CARG1, ->fff_fallback
+ |6: // Handle numbers.
+ |. ld CARG2, 0(TMP2)
+ | beq TMP2, TMP3, ->fff_resn
+ |.if FPU
+ | ldc1 FARG1, 0(TMP2)
+ |.else
+ | move CRET1, CARG1
+ |.endif
+ | checknum CARG2, >8
+ |. nop
+ |7:
+ |.if FPU
+ |.if MIPSR6
+ | fpins FRET1, FRET1, FARG1
+ |.else
+ |.if fpins // ismax
+ | c.olt.d FARG1, FRET1
+ |.else
+ | c.olt.d FRET1, FARG1
+ |.endif
+ | movf.d FRET1, FARG1
+ |.endif
+ |.else
+ |.if fpins // ismax
+ | bal ->vm_sfcmpogt
+ |.else
+ | bal ->vm_sfcmpolt
+ |.endif
+ |. nop
+ |.if MIPSR6
+ | seleqz AT, CARG2, CRET1
+ | selnez CARG1, CARG1, CRET1
+ | or CARG1, CARG1, AT
+ |.else
+ | movz CARG1, CARG2, CRET1
+ |.endif
+ |.endif
+ | b <6
+ |. daddiu TMP2, TMP2, 8
+ |
+ |8: // Convert integer to number and continue with number loop.
+ | checkint CARG2, ->fff_fallback
+ |.if FPU
+ |. lwc1 FARG1, LO(TMP2)
+ | b <7
+ |. cvt.d.w FARG1, FARG1
+ |.else
+ |. lw CARG2, LO(TMP2)
+ | bal ->vm_sfi2d_2
+ |. nop
+ | b <7
+ |. nop
+ |.endif
+ |
+ |.endmacro
+ |
+ |.if MIPSR6
+ | math_minmax math_min, seleqz, selnez, min.d
+ | math_minmax math_max, selnez, seleqz, max.d
+ |.else
+ | math_minmax math_min, movz, _, 0
+ | math_minmax math_max, movn, _, 1
+ |.endif
+ |
+ |//-- String library -----------------------------------------------------
+ |
+ |.ffunc string_byte // Only handle the 1-arg case here.
+ | ld CARG1, 0(BASE)
+ | gettp TMP0, CARG1
+ | xori AT, NARGS8:RC, 8
+ | daddiu TMP0, TMP0, -LJ_TSTR
+ | or AT, AT, TMP0
+ | bnez AT, ->fff_fallback // Need exactly 1 string argument.
+ |. cleartp STR:CARG1
+ | lw TMP0, STR:CARG1->len
+ | daddiu RA, BASE, -16
+ | ld PC, FRAME_PC(BASE)
+ | sltu RD, r0, TMP0
+ | lbu TMP1, STR:CARG1[1] // Access is always ok (NUL at end).
+ | addiu RD, RD, 1
+ | sll RD, RD, 3 // RD = ((str->len != 0)+1)*8
+ | settp TMP1, TISNUM
+ | b ->fff_res
+ |. sd TMP1, 0(RA)
+ |
+ |.ffunc string_char // Only handle the 1-arg case here.
+ | ffgccheck
+ |.if not MIPSR6
+ |. nop
+ |.endif
+ | ld CARG1, 0(BASE)
+ | gettp TMP0, CARG1
+ | xori AT, NARGS8:RC, 8 // Exactly 1 argument.
+ | daddiu TMP0, TMP0, -LJ_TISNUM // Integer.
+ | li TMP1, 255
+ | sextw CARG1, CARG1
+ | or AT, AT, TMP0
+ | sltu TMP1, TMP1, CARG1 // !(255 < n).
+ | or AT, AT, TMP1
+ | bnez AT, ->fff_fallback
+ |. li CARG3, 1
+ | daddiu CARG2, sp, TMPD_OFS
+ | sb CARG1, TMPD
+ |->fff_newstr:
+ | load_got lj_str_new
+ | sd BASE, L->base
+ | sd PC, SAVE_PC
+ | call_intern lj_str_new // (lua_State *L, char *str, size_t l)
+ |. move CARG1, L
+ | // Returns GCstr *.
+ | ld BASE, L->base
+ |->fff_resstr:
+ | li AT, LJ_TSTR
+ | settp CRET1, AT
+ | b ->fff_restv
+ |. move CARG1, CRET1
+ |
+ |.ffunc string_sub
+ | ffgccheck
+ |.if not MIPSR6
+ |. nop
+ |.endif
+ | addiu AT, NARGS8:RC, -16
+ | ld TMP0, 0(BASE)
+ | bltz AT, ->fff_fallback
+ |. gettp TMP3, TMP0
+ | cleartp STR:CARG1, TMP0
+ | ld CARG2, 8(BASE)
+ | beqz AT, >1
+ |. li CARG4, -1
+ | ld CARG3, 16(BASE)
+ | checkint CARG3, ->fff_fallback
+ |. sextw CARG4, CARG3
+ |1:
+ | checkint CARG2, ->fff_fallback
+ |. li AT, LJ_TSTR
+ | bne TMP3, AT, ->fff_fallback
+ |. sextw CARG3, CARG2
+ | lw CARG2, STR:CARG1->len
+ | // STR:CARG1 = str, CARG2 = str->len, CARG3 = start, CARG4 = end
+ | slt AT, CARG4, r0
+ | addiu TMP0, CARG2, 1
+ | addu TMP1, CARG4, TMP0
+ | slt TMP3, CARG3, r0
+ |.if MIPSR6
+ | seleqz CARG4, CARG4, AT
+ | selnez TMP1, TMP1, AT
+ | or CARG4, TMP1, CARG4 // if (end < 0) end += len+1
+ |.else
+ | movn CARG4, TMP1, AT // if (end < 0) end += len+1
+ |.endif
+ | addu TMP1, CARG3, TMP0
+ |.if MIPSR6
+ | selnez TMP1, TMP1, TMP3
+ | seleqz CARG3, CARG3, TMP3
+ | or CARG3, TMP1, CARG3 // if (start < 0) start += len+1
+ | li TMP2, 1
+ | slt AT, CARG4, r0
+ | slt TMP3, r0, CARG3
+ | seleqz CARG4, CARG4, AT // if (end < 0) end = 0
+ | selnez CARG3, CARG3, TMP3
+ | seleqz TMP2, TMP2, TMP3
+ | or CARG3, TMP2, CARG3 // if (start < 1) start = 1
+ | slt AT, CARG2, CARG4
+ | seleqz CARG4, CARG4, AT
+ | selnez CARG2, CARG2, AT
+ | or CARG4, CARG2, CARG4 // if (end > len) end = len
+ |.else
+ | movn CARG3, TMP1, TMP3 // if (start < 0) start += len+1
+ | li TMP2, 1
+ | slt AT, CARG4, r0
+ | slt TMP3, r0, CARG3
+ | movn CARG4, r0, AT // if (end < 0) end = 0
+ | movz CARG3, TMP2, TMP3 // if (start < 1) start = 1
+ | slt AT, CARG2, CARG4
+ | movn CARG4, CARG2, AT // if (end > len) end = len
+ |.endif
+ | daddu CARG2, STR:CARG1, CARG3
+ | subu CARG3, CARG4, CARG3 // len = end - start
+ | daddiu CARG2, CARG2, sizeof(GCstr)-1
+ | bgez CARG3, ->fff_newstr
+ |. addiu CARG3, CARG3, 1 // len++
+ |->fff_emptystr: // Return empty string.
+ | li AT, LJ_TSTR
+ | daddiu STR:CARG1, DISPATCH, DISPATCH_GL(strempty)
+ | b ->fff_restv
+ |. settp CARG1, AT
+ |
+ |.macro ffstring_op, name
+ | .ffunc string_ .. name
+ | ffgccheck
+ |. nop
+ | beqz NARGS8:RC, ->fff_fallback
+ |. ld CARG2, 0(BASE)
+ | checkstr STR:CARG2, ->fff_fallback
+ | daddiu SBUF:CARG1, DISPATCH, DISPATCH_GL(tmpbuf)
+ | load_got lj_buf_putstr_ .. name
+ | ld TMP0, SBUF:CARG1->b
+ | sd L, SBUF:CARG1->L
+ | sd BASE, L->base
+ | sd TMP0, SBUF:CARG1->w
+ | call_intern extern lj_buf_putstr_ .. name
+ |. sd PC, SAVE_PC
+ | load_got lj_buf_tostr
+ | call_intern lj_buf_tostr
+ |. move SBUF:CARG1, SBUF:CRET1
+ | b ->fff_resstr
+ |. ld BASE, L->base
+ |.endmacro
+ |
+ |ffstring_op reverse
+ |ffstring_op lower
+ |ffstring_op upper
+ |
+ |//-- Bit library --------------------------------------------------------
+ |
+ |->vm_tobit_fb:
+ | beqz TMP1, ->fff_fallback
+ |.if FPU
+ |. ldc1 FARG1, 0(BASE)
+ | add.d FARG1, FARG1, TOBIT
+ | mfc1 CRET1, FARG1
+ | jr ra
+ |. zextw CRET1, CRET1
+ |.else
+ |// FP number to bit conversion for soft-float.
+ |->vm_tobit:
+ | dsll TMP0, CARG1, 1
+ | li CARG3, 1076
+ | dsrl AT, TMP0, 53
+ | dsubu CARG3, CARG3, AT
+ | sltiu AT, CARG3, 54
+ | beqz AT, >1
+ |. dextm TMP0, TMP0, 0, 20
+ | dinsu TMP0, AT, 21, 21
+ | slt AT, CARG1, r0
+ | dsrlv CRET1, TMP0, CARG3
+ | dsubu TMP0, r0, CRET1
+ |.if MIPSR6
+ | selnez TMP0, TMP0, AT
+ | seleqz CRET1, CRET1, AT
+ | or CRET1, CRET1, TMP0
+ |.else
+ | movn CRET1, TMP0, AT
+ |.endif
+ | jr ra
+ |. zextw CRET1, CRET1
+ |1:
+ | jr ra
+ |. move CRET1, r0
+ |
+ |// FP number to int conversion with a check for soft-float.
+ |// Modifies CARG1, CRET1, CRET2, TMP0, AT.
+ |->vm_tointg:
+ |.if JIT
+ | dsll CRET2, CARG1, 1
+ | beqz CRET2, >2
+ |. li TMP0, 1076
+ | dsrl AT, CRET2, 53
+ | dsubu TMP0, TMP0, AT
+ | sltiu AT, TMP0, 54
+ | beqz AT, >1
+ |. dextm CRET2, CRET2, 0, 20
+ | dinsu CRET2, AT, 21, 21
+ | slt AT, CARG1, r0
+ | dsrlv CRET1, CRET2, TMP0
+ | dsubu CARG1, r0, CRET1
+ |.if MIPSR6
+ | seleqz CRET1, CRET1, AT
+ | selnez CARG1, CARG1, AT
+ | or CRET1, CRET1, CARG1
+ |.else
+ | movn CRET1, CARG1, AT
+ |.endif
+ | li CARG1, 64
+ | subu TMP0, CARG1, TMP0
+ | dsllv CRET2, CRET2, TMP0 // Integer check.
+ | sextw AT, CRET1
+ | xor AT, CRET1, AT // Range check.
+ |.if MIPSR6
+ | seleqz AT, AT, CRET2
+ | selnez CRET2, CRET2, CRET2
+ | jr ra
+ |. or CRET2, AT, CRET2
+ |.else
+ | jr ra
+ |. movz CRET2, AT, CRET2
+ |.endif
+ |1:
+ | jr ra
+ |. li CRET2, 1
+ |2:
+ | jr ra
+ |. move CRET1, r0
+ |.endif
+ |.endif
+ |
+ |.macro .ffunc_bit, name
+ | .ffunc_1 bit_..name
+ | gettp TMP0, CARG1
+ | beq TMP0, TISNUM, >6
+ |. zextw CRET1, CARG1
+ | bal ->vm_tobit_fb
+ |. sltiu TMP1, TMP0, LJ_TISNUM
+ |6:
+ |.endmacro
+ |
+ |.macro .ffunc_bit_op, name, bins
+ | .ffunc_bit name
+ | daddiu TMP2, BASE, 8
+ | daddu TMP3, BASE, NARGS8:RC
+ |1:
+ | beq TMP2, TMP3, ->fff_resi
+ |. ld CARG1, 0(TMP2)
+ | gettp TMP0, CARG1
+ |.if FPU
+ | bne TMP0, TISNUM, >2
+ |. daddiu TMP2, TMP2, 8
+ | zextw CARG1, CARG1
+ | b <1
+ |. bins CRET1, CRET1, CARG1
+ |2:
+ | ldc1 FARG1, -8(TMP2)
+ | sltiu AT, TMP0, LJ_TISNUM
+ | beqz AT, ->fff_fallback
+ |. add.d FARG1, FARG1, TOBIT
+ | mfc1 CARG1, FARG1
+ | zextw CARG1, CARG1
+ | b <1
+ |. bins CRET1, CRET1, CARG1
+ |.else
+ | beq TMP0, TISNUM, >2
+ |. move CRET2, CRET1
+ | bal ->vm_tobit_fb
+ |. sltiu TMP1, TMP0, LJ_TISNUM
+ | move CARG1, CRET2
+ |2:
+ | zextw CARG1, CARG1
+ | bins CRET1, CRET1, CARG1
+ | b <1
+ |. daddiu TMP2, TMP2, 8
+ |.endif
+ |.endmacro
+ |
+ |.ffunc_bit_op band, and
+ |.ffunc_bit_op bor, or
+ |.ffunc_bit_op bxor, xor
+ |
+ |.ffunc_bit bswap
+ | dsrl TMP0, CRET1, 8
+ | dsrl TMP1, CRET1, 24
+ | andi TMP2, TMP0, 0xff00
+ | dins TMP1, CRET1, 24, 31
+ | dins TMP2, TMP0, 16, 23
+ | b ->fff_resi
+ |. or CRET1, TMP1, TMP2
+ |
+ |.ffunc_bit bnot
+ | not CRET1, CRET1
+ | b ->fff_resi
+ |. zextw CRET1, CRET1
+ |
+ |.macro .ffunc_bit_sh, name, shins, shmod
+ | .ffunc_2 bit_..name
+ | gettp TMP0, CARG1
+ | beq TMP0, TISNUM, >1
+ |. nop
+ | bal ->vm_tobit_fb
+ |. sltiu TMP1, TMP0, LJ_TISNUM
+ | move CARG1, CRET1
+ |1:
+ | gettp TMP0, CARG2
+ | bne TMP0, TISNUM, ->fff_fallback
+ |. zextw CARG2, CARG2
+ | sextw CARG1, CARG1
+ |.if shmod == 1
+ | negu CARG2, CARG2
+ |.endif
+ | shins CRET1, CARG1, CARG2
+ | b ->fff_resi
+ |. zextw CRET1, CRET1
+ |.endmacro
+ |
+ |.ffunc_bit_sh lshift, sllv, 0
+ |.ffunc_bit_sh rshift, srlv, 0
+ |.ffunc_bit_sh arshift, srav, 0
+ |.ffunc_bit_sh rol, rotrv, 1
+ |.ffunc_bit_sh ror, rotrv, 0
+ |
+ |.ffunc_bit tobit
+ |->fff_resi:
+ | ld PC, FRAME_PC(BASE)
+ | daddiu RA, BASE, -16
+ | settp CRET1, TISNUM
+ | b ->fff_res1
+ |. sd CRET1, -16(BASE)
+ |
+ |//-----------------------------------------------------------------------
+ |->fff_fallback: // Call fast function fallback handler.
+ | // BASE = new base, RB = CFUNC, RC = nargs*8
+ | ld TMP3, CFUNC:RB->f
+ | daddu TMP1, BASE, NARGS8:RC
+ | ld PC, FRAME_PC(BASE) // Fallback may overwrite PC.
+ | daddiu TMP0, TMP1, 8*LUA_MINSTACK
+ | ld TMP2, L->maxstack
+ | sd PC, SAVE_PC // Redundant (but a defined value).
+ | sltu AT, TMP2, TMP0
+ | sd BASE, L->base
+ | sd TMP1, L->top
+ | bnez AT, >5 // Need to grow stack.
+ |. move CFUNCADDR, TMP3
+ | jalr TMP3 // (lua_State *L)
+ |. move CARG1, L
+ | // Either throws an error, or recovers and returns -1, 0 or nresults+1.
+ | ld BASE, L->base
+ | sll RD, CRET1, 3
+ | bgtz CRET1, ->fff_res // Returned nresults+1?
+ |. daddiu RA, BASE, -16
+ |1: // Returned 0 or -1: retry fast path.
+ | ld LFUNC:RB, FRAME_FUNC(BASE)
+ | ld TMP0, L->top
+ | cleartp LFUNC:RB
+ | bnez CRET1, ->vm_call_tail // Returned -1?
+ |. dsubu NARGS8:RC, TMP0, BASE
+ | ins_callt // Returned 0: retry fast path.
+ |
+ |// Reconstruct previous base for vmeta_call during tailcall.
+ |->vm_call_tail:
+ | andi TMP0, PC, FRAME_TYPE
+ | li AT, -4
+ | bnez TMP0, >3
+ |. and TMP1, PC, AT
+ | lbu TMP1, OFS_RA(PC)
+ | sll TMP1, TMP1, 3
+ | addiu TMP1, TMP1, 16
+ |3:
+ | b ->vm_call_dispatch // Resolve again for tailcall.
+ |. dsubu TMP2, BASE, TMP1
+ |
+ |5: // Grow stack for fallback handler.
+ | load_got lj_state_growstack
+ | li CARG2, LUA_MINSTACK
+ | call_intern lj_state_growstack // (lua_State *L, int n)
+ |. move CARG1, L
+ | ld BASE, L->base
+ | b <1
+ |. li CRET1, 0 // Force retry.
+ |
+ |->fff_gcstep: // Call GC step function.
+ | // BASE = new base, RC = nargs*8
+ | move MULTRES, ra
+ | load_got lj_gc_step
+ | sd BASE, L->base
+ | daddu TMP0, BASE, NARGS8:RC
+ | sd PC, SAVE_PC // Redundant (but a defined value).
+ | sd TMP0, L->top
+ | call_intern lj_gc_step // (lua_State *L)
+ |. move CARG1, L
+ | ld BASE, L->base
+ | move ra, MULTRES
+ | ld TMP0, L->top
+ | ld CFUNC:RB, FRAME_FUNC(BASE)
+ | cleartp CFUNC:RB
+ | jr ra
+ |. dsubu NARGS8:RC, TMP0, BASE
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Special dispatch targets -------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_record: // Dispatch target for recording phase.
+ |.if JIT
+ | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | andi AT, TMP3, HOOK_VMEVENT // No recording while in vmevent.
+ | bnez AT, >5
+ | // Decrement the hookcount for consistency, but always do the call.
+ |. lw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | andi AT, TMP3, HOOK_ACTIVE
+ | bnez AT, >1
+ |. addiu TMP2, TMP2, -1
+ | andi AT, TMP3, LUA_MASKLINE|LUA_MASKCOUNT
+ | beqz AT, >1
+ |. nop
+ | b >1
+ |. sw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ |.endif
+ |
+ |->vm_rethook: // Dispatch target for return hooks.
+ | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | andi AT, TMP3, HOOK_ACTIVE // Hook already active?
+ | beqz AT, >1
+ |5: // Re-dispatch to static ins.
+ |. ld AT, GG_DISP2STATIC(TMP0) // Assumes TMP0 holds DISPATCH+OP*4.
+ | jr AT
+ |. nop
+ |
+ |->vm_inshook: // Dispatch target for instr/line hooks.
+ | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | lw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | andi AT, TMP3, HOOK_ACTIVE // Hook already active?
+ | bnez AT, <5
+ |. andi AT, TMP3, LUA_MASKLINE|LUA_MASKCOUNT
+ | beqz AT, <5
+ |. addiu TMP2, TMP2, -1
+ | beqz TMP2, >1
+ |. sw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | andi AT, TMP3, LUA_MASKLINE
+ | beqz AT, <5
+ |1:
+ |. load_got lj_dispatch_ins
+ | sw MULTRES, SAVE_MULTRES
+ | move CARG2, PC
+ | sd BASE, L->base
+ | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
+ | call_intern lj_dispatch_ins // (lua_State *L, const BCIns *pc)
+ |. move CARG1, L
+ |3:
+ | ld BASE, L->base
+ |4: // Re-dispatch to static ins.
+ | lw INS, -4(PC)
+ | decode_OP8a TMP1, INS
+ | decode_OP8b TMP1
+ | daddu TMP0, DISPATCH, TMP1
+ | decode_RD8a RD, INS
+ | ld AT, GG_DISP2STATIC(TMP0)
+ | decode_RA8a RA, INS
+ | decode_RD8b RD
+ | jr AT
+ | decode_RA8b RA
+ |
+ |->cont_hook: // Continue from hook yield.
+ | daddiu PC, PC, 4
+ | b <4
+ |. lw MULTRES, -24+LO(RB) // Restore MULTRES for *M ins.
+ |
+ |->vm_hotloop: // Hot loop counter underflow.
+ |.if JIT
+ | ld LFUNC:TMP1, FRAME_FUNC(BASE)
+ | daddiu CARG1, DISPATCH, GG_DISP2J
+ | cleartp LFUNC:TMP1
+ | sd PC, SAVE_PC
+ | ld TMP1, LFUNC:TMP1->pc
+ | move CARG2, PC
+ | sd L, DISPATCH_J(L)(DISPATCH)
+ | lbu TMP1, PC2PROTO(framesize)(TMP1)
+ | load_got lj_trace_hot
+ | sd BASE, L->base
+ | dsll TMP1, TMP1, 3
+ | daddu TMP1, BASE, TMP1
+ | call_intern lj_trace_hot // (jit_State *J, const BCIns *pc)
+ |. sd TMP1, L->top
+ | b <3
+ |. nop
+ |.endif
+ |
+ |
+ |->vm_callhook: // Dispatch target for call hooks.
+ |.if JIT
+ | b >1
+ |.endif
+ |. move CARG2, PC
+ |
+ |->vm_hotcall: // Hot call counter underflow.
+ |.if JIT
+ | ori CARG2, PC, 1
+ |1:
+ |.endif
+ | load_got lj_dispatch_call
+ | daddu TMP0, BASE, RC
+ | sd PC, SAVE_PC
+ | sd BASE, L->base
+ | dsubu RA, RA, BASE
+ | sd TMP0, L->top
+ | call_intern lj_dispatch_call // (lua_State *L, const BCIns *pc)
+ |. move CARG1, L
+ | // Returns ASMFunction.
+ | ld BASE, L->base
+ | ld TMP0, L->top
+ | sd r0, SAVE_PC // Invalidate for subsequent line hook.
+ | dsubu NARGS8:RC, TMP0, BASE
+ | daddu RA, BASE, RA
+ | ld LFUNC:RB, FRAME_FUNC(BASE)
+ | cleartp LFUNC:RB
+ | jr CRET1
+ |. lw INS, -4(PC)
+ |
+ |->cont_stitch: // Trace stitching.
+ |.if JIT
+ | // RA = resultptr, RB = meta base
+ | lw INS, -4(PC)
+ | ld TRACE:TMP2, -40(RB) // Save previous trace.
+ | decode_RA8a RC, INS
+ | daddiu AT, MULTRES, -8
+ | cleartp TRACE:TMP2
+ | decode_RA8b RC
+ | beqz AT, >2
+ |. daddu RC, BASE, RC // Call base.
+ |1: // Move results down.
+ | ld CARG1, 0(RA)
+ | daddiu AT, AT, -8
+ | daddiu RA, RA, 8
+ | sd CARG1, 0(RC)
+ | bnez AT, <1
+ |. daddiu RC, RC, 8
+ |2:
+ | decode_RA8a RA, INS
+ | decode_RB8a RB, INS
+ | decode_RA8b RA
+ | decode_RB8b RB
+ | daddu RA, RA, RB
+ | daddu RA, BASE, RA
+ |3:
+ | sltu AT, RC, RA
+ | bnez AT, >9 // More results wanted?
+ |. nop
+ |
+ | lhu TMP3, TRACE:TMP2->traceno
+ | lhu RD, TRACE:TMP2->link
+ | beq RD, TMP3, ->cont_nop // Blacklisted.
+ |. load_got lj_dispatch_stitch
+ | bnez RD, =>BC_JLOOP // Jump to stitched trace.
+ |. sll RD, RD, 3
+ |
+ | // Stitch a new trace to the previous trace.
+ | sw TMP3, DISPATCH_J(exitno)(DISPATCH)
+ | sd L, DISPATCH_J(L)(DISPATCH)
+ | sd BASE, L->base
+ | daddiu CARG1, DISPATCH, GG_DISP2J
+ | call_intern lj_dispatch_stitch // (jit_State *J, const BCIns *pc)
+ |. move CARG2, PC
+ | b ->cont_nop
+ |. ld BASE, L->base
+ |
+ |9:
+ | sd TISNIL, 0(RC)
+ | b <3
+ |. daddiu RC, RC, 8
+ |.endif
+ |
+ |->vm_profhook: // Dispatch target for profiler hook.
+#if LJ_HASPROFILE
+ | load_got lj_dispatch_profile
+ | sw MULTRES, SAVE_MULTRES
+ | move CARG2, PC
+ | sd BASE, L->base
+ | call_intern lj_dispatch_profile // (lua_State *L, const BCIns *pc)
+ |. move CARG1, L
+ | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction.
+ | daddiu PC, PC, -4
+ | b ->cont_nop
+ |. ld BASE, L->base
+#endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Trace exit handler -------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro savex_, a, b
+ |.if FPU
+ | sdc1 f..a, a*8(sp)
+ | sdc1 f..b, b*8(sp)
+ | sd r..a, 32*8+a*8(sp)
+ | sd r..b, 32*8+b*8(sp)
+ |.else
+ | sd r..a, a*8(sp)
+ | sd r..b, b*8(sp)
+ |.endif
+ |.endmacro
+ |
+ |->vm_exit_handler:
+ |.if JIT
+ |.if FPU
+ | daddiu sp, sp, -(32*8+32*8)
+ |.else
+ | daddiu sp, sp, -(32*8)
+ |.endif
+ | savex_ 0, 1
+ | savex_ 2, 3
+ | savex_ 4, 5
+ | savex_ 6, 7
+ | savex_ 8, 9
+ | savex_ 10, 11
+ | savex_ 12, 13
+ | savex_ 14, 15
+ | savex_ 16, 17
+ | savex_ 18, 19
+ | savex_ 20, 21
+ | savex_ 22, 23
+ | savex_ 24, 25
+ | savex_ 26, 27
+ | savex_ 28, 30
+ |.if FPU
+ | sdc1 f29, 29*8(sp)
+ | sdc1 f31, 31*8(sp)
+ | sd r0, 32*8+31*8(sp) // Clear RID_TMP.
+ | daddiu TMP2, sp, 32*8+32*8 // Recompute original value of sp.
+ | sd TMP2, 32*8+29*8(sp) // Store sp in RID_SP
+ |.else
+ | sd r0, 31*8(sp) // Clear RID_TMP.
+ | daddiu TMP2, sp, 32*8 // Recompute original value of sp.
+ | sd TMP2, 29*8(sp) // Store sp in RID_SP
+ |.endif
+ | li_vmstate EXIT
+ | daddiu DISPATCH, JGL, -GG_DISP2G-32768
+ | lw TMP1, 0(TMP2) // Load exit number.
+ | st_vmstate
+ | ld L, DISPATCH_GL(cur_L)(DISPATCH)
+ | ld BASE, DISPATCH_GL(jit_base)(DISPATCH)
+ | load_got lj_trace_exit
+ | sd L, DISPATCH_J(L)(DISPATCH)
+ | sw ra, DISPATCH_J(parent)(DISPATCH) // Store trace number.
+ | sd BASE, L->base
+ | sw TMP1, DISPATCH_J(exitno)(DISPATCH) // Store exit number.
+ | daddiu CARG1, DISPATCH, GG_DISP2J
+ | sd r0, DISPATCH_GL(jit_base)(DISPATCH)
+ | call_intern lj_trace_exit // (jit_State *J, ExitState *ex)
+ |. move CARG2, sp
+ | // Returns MULTRES (unscaled) or negated error code.
+ | ld TMP1, L->cframe
+ | li AT, -4
+ | ld BASE, L->base
+ | and sp, TMP1, AT
+ | ld PC, SAVE_PC // Get SAVE_PC.
+ | b >1
+ |. sd L, SAVE_L // Set SAVE_L (on-trace resume/yield).
+ |.endif
+ |->vm_exit_interp:
+ |.if JIT
+ | // CRET1 = MULTRES or negated error code, BASE, PC and JGL set.
+ | ld L, SAVE_L
+ | daddiu DISPATCH, JGL, -GG_DISP2G-32768
+ | sd BASE, L->base
+ |1:
+ | sltiu TMP0, CRET1, -LUA_ERRERR // Check for error from exit.
+ | beqz TMP0, >9
+ |. ld LFUNC:RB, FRAME_FUNC(BASE)
+ | .FPU lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | dsll MULTRES, CRET1, 3
+ | cleartp LFUNC:RB
+ | sw MULTRES, SAVE_MULTRES
+ | li TISNIL, LJ_TNIL
+ | li TISNUM, LJ_TISNUM // Setup type comparison constants.
+ | .FPU mtc1 TMP3, TOBIT
+ | ld TMP1, LFUNC:RB->pc
+ | sd r0, DISPATCH_GL(jit_base)(DISPATCH)
+ | ld KBASE, PC2PROTO(k)(TMP1)
+ | .FPU cvt.d.s TOBIT, TOBIT
+ | // Modified copy of ins_next which handles function header dispatch, too.
+ | lw INS, 0(PC)
+ | addiu CRET1, CRET1, 17 // Static dispatch?
+ | // Assumes TISNIL == ~LJ_VMST_INTERP == -1
+ | sw TISNIL, DISPATCH_GL(vmstate)(DISPATCH)
+ | decode_RD8a RD, INS
+ | beqz CRET1, >5
+ |. daddiu PC, PC, 4
+ | decode_OP8a TMP1, INS
+ | decode_OP8b TMP1
+ | daddu TMP0, DISPATCH, TMP1
+ | sltiu TMP2, TMP1, BC_FUNCF*8
+ | ld AT, 0(TMP0)
+ | decode_RA8a RA, INS
+ | beqz TMP2, >2
+ |. decode_RA8b RA
+ | jr AT
+ |. decode_RD8b RD
+ |2:
+ | sltiu TMP2, TMP1, (BC_FUNCC+2)*8 // Fast function?
+ | bnez TMP2, >3
+ |. ld TMP1, FRAME_PC(BASE)
+ | // Check frame below fast function.
+ | andi TMP0, TMP1, FRAME_TYPE
+ | bnez TMP0, >3 // Trace stitching continuation?
+ |. nop
+ | // Otherwise set KBASE for Lua function below fast function.
+ | lw TMP2, -4(TMP1)
+ | decode_RA8a TMP0, TMP2
+ | decode_RA8b TMP0
+ | dsubu TMP1, BASE, TMP0
+ | ld LFUNC:TMP2, -32(TMP1)
+ | cleartp LFUNC:TMP2
+ | ld TMP1, LFUNC:TMP2->pc
+ | ld KBASE, PC2PROTO(k)(TMP1)
+ |3:
+ | daddiu RC, MULTRES, -8
+ | jr AT
+ |. daddu RA, RA, BASE
+ |
+ |5: // Dispatch to static entry of original ins replaced by BC_JLOOP.
+ | ld TMP0, DISPATCH_J(trace)(DISPATCH)
+ | decode_RD8b RD
+ | daddu TMP0, TMP0, RD
+ | ld TRACE:TMP2, 0(TMP0)
+ | lw INS, TRACE:TMP2->startins
+ | decode_OP8a TMP1, INS
+ | decode_OP8b TMP1
+ | daddu TMP0, DISPATCH, TMP1
+ | decode_RD8a RD, INS
+ | ld AT, GG_DISP2STATIC(TMP0)
+ | decode_RA8a RA, INS
+ | decode_RD8b RD
+ | jr AT
+ |. decode_RA8b RA
+ |
+ |9: // Rethrow error from the right C frame.
+ | load_got lj_err_trace
+ | sub CARG2, r0, CRET1
+ | call_intern lj_err_trace // (lua_State *L, int errcode)
+ |. move CARG1, L
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Math helper functions ----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// Hard-float round to integer.
+ |// Modifies AT, TMP0, FRET1, FRET2, f4. Keeps all others incl. FARG1.
+ |// MIPSR6: Modifies FTMP1, too.
+ |.macro vm_round_hf, func
+ | lui TMP0, 0x4330 // Hiword of 2^52 (double).
+ | dsll TMP0, TMP0, 32
+ | dmtc1 TMP0, f4
+ | abs.d FRET2, FARG1 // |x|
+ | dmfc1 AT, FARG1
+ |.if MIPSR6
+ | cmp.lt.d FTMP1, FRET2, f4
+ | add.d FRET1, FRET2, f4 // (|x| + 2^52) - 2^52
+ | bc1eqz FTMP1, >1 // Truncate only if |x| < 2^52.
+ |.else
+ | c.olt.d 0, FRET2, f4
+ | add.d FRET1, FRET2, f4 // (|x| + 2^52) - 2^52
+ | bc1f 0, >1 // Truncate only if |x| < 2^52.
+ |.endif
+ |. sub.d FRET1, FRET1, f4
+ | slt AT, AT, r0
+ |.if "func" == "ceil"
+ | lui TMP0, 0xbff0 // Hiword of -1 (double). Preserves -0.
+ |.else
+ | lui TMP0, 0x3ff0 // Hiword of +1 (double).
+ |.endif
+ |.if "func" == "trunc"
+ | dsll TMP0, TMP0, 32
+ | dmtc1 TMP0, f4
+ |.if MIPSR6
+ | cmp.lt.d FTMP1, FRET2, FRET1 // |x| < result?
+ | sub.d FRET2, FRET1, f4
+ | sel.d FTMP1, FRET1, FRET2 // If yes, subtract +1.
+ | dmtc1 AT, FRET1
+ | neg.d FRET2, FTMP1
+ | jr ra
+ |. sel.d FRET1, FTMP1, FRET2 // Merge sign bit back in.
+ |.else
+ | c.olt.d 0, FRET2, FRET1 // |x| < result?
+ | sub.d FRET2, FRET1, f4
+ | movt.d FRET1, FRET2, 0 // If yes, subtract +1.
+ | neg.d FRET2, FRET1
+ | jr ra
+ |. movn.d FRET1, FRET2, AT // Merge sign bit back in.
+ |.endif
+ |.else
+ | neg.d FRET2, FRET1
+ | dsll TMP0, TMP0, 32
+ | dmtc1 TMP0, f4
+ |.if MIPSR6
+ | dmtc1 AT, FTMP1
+ | sel.d FTMP1, FRET1, FRET2
+ |.if "func" == "ceil"
+ | cmp.lt.d FRET1, FTMP1, FARG1 // x > result?
+ |.else
+ | cmp.lt.d FRET1, FARG1, FTMP1 // x < result?
+ |.endif
+ | sub.d FRET2, FTMP1, f4 // If yes, subtract +-1.
+ | jr ra
+ |. sel.d FRET1, FTMP1, FRET2
+ |.else
+ | movn.d FRET1, FRET2, AT // Merge sign bit back in.
+ |.if "func" == "ceil"
+ | c.olt.d 0, FRET1, FARG1 // x > result?
+ |.else
+ | c.olt.d 0, FARG1, FRET1 // x < result?
+ |.endif
+ | sub.d FRET2, FRET1, f4 // If yes, subtract +-1.
+ | jr ra
+ |. movt.d FRET1, FRET2, 0
+ |.endif
+ |.endif
+ |1:
+ | jr ra
+ |. mov.d FRET1, FARG1
+ |.endmacro
+ |
+ |.macro vm_round, func
+ |.if FPU
+ | vm_round_hf, func
+ |.endif
+ |.endmacro
+ |
+ |->vm_floor:
+ | vm_round floor
+ |->vm_ceil:
+ | vm_round ceil
+ |->vm_trunc:
+ |.if JIT
+ | vm_round trunc
+ |.endif
+ |
+ |// Soft-float integer to number conversion.
+ |.macro sfi2d, ARG
+ |.if not FPU
+ | beqz ARG, >9 // Handle zero first.
+ |. sra TMP0, ARG, 31
+ | xor TMP1, ARG, TMP0
+ | dsubu TMP1, TMP1, TMP0 // Absolute value in TMP1.
+ | dclz ARG, TMP1
+ | addiu ARG, ARG, -11
+ | li AT, 0x3ff+63-11-1
+ | dsllv TMP1, TMP1, ARG // Align mantissa left with leading 1.
+ | subu ARG, AT, ARG // Exponent - 1.
+ | ins ARG, TMP0, 11, 11 // Sign | Exponent.
+ | dsll ARG, ARG, 52 // Align left.
+ | jr ra
+ |. daddu ARG, ARG, TMP1 // Add mantissa, increment exponent.
+ |9:
+ | jr ra
+ |. nop
+ |.endif
+ |.endmacro
+ |
+ |// Input CARG1. Output: CARG1. Temporaries: AT, TMP0, TMP1.
+ |->vm_sfi2d_1:
+ | sfi2d CARG1
+ |
+ |// Input CARG2. Output: CARG2. Temporaries: AT, TMP0, TMP1.
+ |->vm_sfi2d_2:
+ | sfi2d CARG2
+ |
+ |// Soft-float comparison. Equivalent to c.eq.d.
+ |// Input: CARG*. Output: CRET1. Temporaries: AT, TMP0, TMP1.
+ |->vm_sfcmpeq:
+ |.if not FPU
+ | dsll AT, CARG1, 1
+ | dsll TMP0, CARG2, 1
+ | or TMP1, AT, TMP0
+ | beqz TMP1, >8 // Both args +-0: return 1.
+ |. lui TMP1, 0xffe0
+ | dsll TMP1, TMP1, 32
+ | sltu AT, TMP1, AT
+ | sltu TMP0, TMP1, TMP0
+ | or TMP1, AT, TMP0
+ | bnez TMP1, >9 // Either arg is NaN: return 0;
+ |. xor AT, CARG1, CARG2
+ | jr ra
+ |. sltiu CRET1, AT, 1 // Same values: return 1.
+ |8:
+ | jr ra
+ |. li CRET1, 1
+ |9:
+ | jr ra
+ |. li CRET1, 0
+ |.endif
+ |
+ |// Soft-float comparison. Equivalent to c.ult.d and c.olt.d.
+ |// Input: CARG1, CARG2. Output: CRET1. Temporaries: AT, TMP0, TMP1, CRET2.
+ |->vm_sfcmpult:
+ |.if not FPU
+ | b >1
+ |. li CRET2, 1
+ |.endif
+ |
+ |->vm_sfcmpolt:
+ |.if not FPU
+ | li CRET2, 0
+ |1:
+ | dsll AT, CARG1, 1
+ | dsll TMP0, CARG2, 1
+ | or TMP1, AT, TMP0
+ | beqz TMP1, >8 // Both args +-0: return 0.
+ |. lui TMP1, 0xffe0
+ | dsll TMP1, TMP1, 32
+ | sltu AT, TMP1, AT
+ | sltu TMP0, TMP1, TMP0
+ | or TMP1, AT, TMP0
+ | bnez TMP1, >9 // Either arg is NaN: return 0 or 1;
+ |. and AT, CARG1, CARG2
+ | bltz AT, >5 // Both args negative?
+ |. nop
+ | jr ra
+ |. slt CRET1, CARG1, CARG2
+ |5: // Swap conditions if both operands are negative.
+ | jr ra
+ |. slt CRET1, CARG2, CARG1
+ |8:
+ | jr ra
+ |. li CRET1, 0
+ |9:
+ | jr ra
+ |. move CRET1, CRET2
+ |.endif
+ |
+ |->vm_sfcmpogt:
+ |.if not FPU
+ | dsll AT, CARG2, 1
+ | dsll TMP0, CARG1, 1
+ | or TMP1, AT, TMP0
+ | beqz TMP1, >8 // Both args +-0: return 0.
+ |. lui TMP1, 0xffe0
+ | dsll TMP1, TMP1, 32
+ | sltu AT, TMP1, AT
+ | sltu TMP0, TMP1, TMP0
+ | or TMP1, AT, TMP0
+ | bnez TMP1, >9 // Either arg is NaN: return 0 or 1;
+ |. and AT, CARG2, CARG1
+ | bltz AT, >5 // Both args negative?
+ |. nop
+ | jr ra
+ |. slt CRET1, CARG2, CARG1
+ |5: // Swap conditions if both operands are negative.
+ | jr ra
+ |. slt CRET1, CARG1, CARG2
+ |8:
+ | jr ra
+ |. li CRET1, 0
+ |9:
+ | jr ra
+ |. li CRET1, 0
+ |.endif
+ |
+ |// Soft-float comparison. Equivalent to c.ole.d a, b or c.ole.d b, a.
+ |// Input: CARG1, CARG2, TMP3. Output: CRET1. Temporaries: AT, TMP0, TMP1.
+ |->vm_sfcmpolex:
+ |.if not FPU
+ | dsll AT, CARG1, 1
+ | dsll TMP0, CARG2, 1
+ | or TMP1, AT, TMP0
+ | beqz TMP1, >8 // Both args +-0: return 1.
+ |. lui TMP1, 0xffe0
+ | dsll TMP1, TMP1, 32
+ | sltu AT, TMP1, AT
+ | sltu TMP0, TMP1, TMP0
+ | or TMP1, AT, TMP0
+ | bnez TMP1, >9 // Either arg is NaN: return 0;
+ |. and AT, CARG1, CARG2
+ | xor AT, AT, TMP3
+ | bltz AT, >5 // Both args negative?
+ |. nop
+ | jr ra
+ |. slt CRET1, CARG2, CARG1
+ |5: // Swap conditions if both operands are negative.
+ | jr ra
+ |. slt CRET1, CARG1, CARG2
+ |8:
+ | jr ra
+ |. li CRET1, 1
+ |9:
+ | jr ra
+ |. li CRET1, 0
+ |.endif
+ |
+ |.macro sfmin_max, name, fpcall
+ |->vm_sf .. name:
+ |.if JIT and not FPU
+ | move TMP2, ra
+ | bal ->fpcall
+ |. nop
+ | move ra, TMP2
+ | move TMP0, CRET1
+ | move CRET1, CARG1
+ |.if MIPSR6
+ | selnez CRET1, CRET1, TMP0
+ | seleqz TMP0, CARG2, TMP0
+ | jr ra
+ |. or CRET1, CRET1, TMP0
+ |.else
+ | jr ra
+ |. movz CRET1, CARG2, TMP0
+ |.endif
+ |.endif
+ |.endmacro
+ |
+ | sfmin_max min, vm_sfcmpolt
+ | sfmin_max max, vm_sfcmpogt
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Miscellaneous functions --------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.define NEXT_TAB, TAB:CARG1
+ |.define NEXT_IDX, CARG2
+ |.define NEXT_ASIZE, CARG3
+ |.define NEXT_NIL, CARG4
+ |.define NEXT_TMP0, r12
+ |.define NEXT_TMP1, r13
+ |.define NEXT_TMP2, r14
+ |.define NEXT_RES_VK, CRET1
+ |.define NEXT_RES_IDX, CRET2
+ |.define NEXT_RES_PTR, sp
+ |.define NEXT_RES_VAL, 0(sp)
+ |.define NEXT_RES_KEY, 8(sp)
+ |
+ |// TValue *lj_vm_next(GCtab *t, uint32_t idx)
+ |// Next idx returned in CRET2.
+ |->vm_next:
+ |.if JIT and ENDIAN_LE
+ | lw NEXT_ASIZE, NEXT_TAB->asize
+ | ld NEXT_TMP0, NEXT_TAB->array
+ | li NEXT_NIL, LJ_TNIL
+ |1: // Traverse array part.
+ | sltu AT, NEXT_IDX, NEXT_ASIZE
+ | sll NEXT_TMP1, NEXT_IDX, 3
+ | beqz AT, >5
+ |. daddu NEXT_TMP1, NEXT_TMP0, NEXT_TMP1
+ | li AT, LJ_TISNUM
+ | ld NEXT_TMP2, 0(NEXT_TMP1)
+ | dsll AT, AT, 47
+ | or NEXT_TMP1, NEXT_IDX, AT
+ | beq NEXT_TMP2, NEXT_NIL, <1
+ |. addiu NEXT_IDX, NEXT_IDX, 1
+ | sd NEXT_TMP2, NEXT_RES_VAL
+ | sd NEXT_TMP1, NEXT_RES_KEY
+ | move NEXT_RES_VK, NEXT_RES_PTR
+ | jr ra
+ |. move NEXT_RES_IDX, NEXT_IDX
+ |
+ |5: // Traverse hash part.
+ | subu NEXT_RES_IDX, NEXT_IDX, NEXT_ASIZE
+ | ld NODE:NEXT_RES_VK, NEXT_TAB->node
+ | sll NEXT_TMP2, NEXT_RES_IDX, 5
+ | lw NEXT_TMP0, NEXT_TAB->hmask
+ | sll AT, NEXT_RES_IDX, 3
+ | subu AT, NEXT_TMP2, AT
+ | daddu NODE:NEXT_RES_VK, NODE:NEXT_RES_VK, AT
+ |6:
+ | sltu AT, NEXT_TMP0, NEXT_RES_IDX
+ | bnez AT, >8
+ |. nop
+ | ld NEXT_TMP2, NODE:NEXT_RES_VK->val
+ | bne NEXT_TMP2, NEXT_NIL, >9
+ |. addiu NEXT_RES_IDX, NEXT_RES_IDX, 1
+ | // Skip holes in hash part.
+ | b <6
+ |. daddiu NODE:NEXT_RES_VK, NODE:NEXT_RES_VK, sizeof(Node)
+ |
+ |8: // End of iteration. Set the key to nil (not the value).
+ | sd NEXT_NIL, NEXT_RES_KEY
+ | move NEXT_RES_VK, NEXT_RES_PTR
+ |9:
+ | jr ra
+ |. addu NEXT_RES_IDX, NEXT_RES_IDX, NEXT_ASIZE
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- FFI helper functions -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// Handler for callback functions. Callback slot number in r1, g in r2.
+ |->vm_ffi_callback:
+ |.if FFI
+ |.type CTSTATE, CTState, PC
+ | saveregs
+ | ld CTSTATE, GL:r2->ctype_state
+ | daddiu DISPATCH, r2, GG_G2DISP
+ | load_got lj_ccallback_enter
+ | sw r1, CTSTATE->cb.slot
+ | sd CARG1, CTSTATE->cb.gpr[0]
+ | .FPU sdc1 FARG1, CTSTATE->cb.fpr[0]
+ | sd CARG2, CTSTATE->cb.gpr[1]
+ | .FPU sdc1 FARG2, CTSTATE->cb.fpr[1]
+ | sd CARG3, CTSTATE->cb.gpr[2]
+ | .FPU sdc1 FARG3, CTSTATE->cb.fpr[2]
+ | sd CARG4, CTSTATE->cb.gpr[3]
+ | .FPU sdc1 FARG4, CTSTATE->cb.fpr[3]
+ | sd CARG5, CTSTATE->cb.gpr[4]
+ | .FPU sdc1 FARG5, CTSTATE->cb.fpr[4]
+ | sd CARG6, CTSTATE->cb.gpr[5]
+ | .FPU sdc1 FARG6, CTSTATE->cb.fpr[5]
+ | sd CARG7, CTSTATE->cb.gpr[6]
+ | .FPU sdc1 FARG7, CTSTATE->cb.fpr[6]
+ | sd CARG8, CTSTATE->cb.gpr[7]
+ | .FPU sdc1 FARG8, CTSTATE->cb.fpr[7]
+ | daddiu TMP0, sp, CFRAME_SPACE
+ | sd TMP0, CTSTATE->cb.stack
+ | sd r0, SAVE_PC // Any value outside of bytecode is ok.
+ | move CARG2, sp
+ | call_intern lj_ccallback_enter // (CTState *cts, void *cf)
+ |. move CARG1, CTSTATE
+ | // Returns lua_State *.
+ | ld BASE, L:CRET1->base
+ | ld RC, L:CRET1->top
+ | move L, CRET1
+ | .FPU lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | ld LFUNC:RB, FRAME_FUNC(BASE)
+ | .FPU mtc1 TMP3, TOBIT
+ | li TISNIL, LJ_TNIL
+ | li TISNUM, LJ_TISNUM
+ | li_vmstate INTERP
+ | subu RC, RC, BASE
+ | cleartp LFUNC:RB
+ | st_vmstate
+ | .FPU cvt.d.s TOBIT, TOBIT
+ | ins_callt
+ |.endif
+ |
+ |->cont_ffi_callback: // Return from FFI callback.
+ |.if FFI
+ | load_got lj_ccallback_leave
+ | ld CTSTATE, DISPATCH_GL(ctype_state)(DISPATCH)
+ | sd BASE, L->base
+ | sd RB, L->top
+ | sd L, CTSTATE->L
+ | move CARG2, RA
+ | call_intern lj_ccallback_leave // (CTState *cts, TValue *o)
+ |. move CARG1, CTSTATE
+ | .FPU ldc1 FRET1, CTSTATE->cb.fpr[0]
+ | ld CRET1, CTSTATE->cb.gpr[0]
+ | .FPU ldc1 FRET2, CTSTATE->cb.fpr[1]
+ | b ->vm_leave_unw
+ |. ld CRET2, CTSTATE->cb.gpr[1]
+ |.endif
+ |
+ |->vm_ffi_call: // Call C function via FFI.
+ | // Caveat: needs special frame unwinding, see below.
+ |.if FFI
+ | .type CCSTATE, CCallState, CARG1
+ | lw TMP1, CCSTATE->spadj
+ | lbu CARG2, CCSTATE->nsp
+ | move TMP2, sp
+ | dsubu sp, sp, TMP1
+ | sd ra, -8(TMP2)
+ | sd r16, -16(TMP2)
+ | sd CCSTATE, -24(TMP2)
+ | move r16, TMP2
+ | daddiu TMP1, CCSTATE, offsetof(CCallState, stack)
+ | move TMP2, sp
+ | beqz CARG2, >2
+ |. daddu TMP3, TMP1, CARG2
+ |1:
+ | ld TMP0, 0(TMP1)
+ | daddiu TMP1, TMP1, 8
+ | sltu AT, TMP1, TMP3
+ | sd TMP0, 0(TMP2)
+ | bnez AT, <1
+ |. daddiu TMP2, TMP2, 8
+ |2:
+ | ld CFUNCADDR, CCSTATE->func
+ | .FPU ldc1 FARG1, CCSTATE->gpr[0]
+ | ld CARG2, CCSTATE->gpr[1]
+ | .FPU ldc1 FARG2, CCSTATE->gpr[1]
+ | ld CARG3, CCSTATE->gpr[2]
+ | .FPU ldc1 FARG3, CCSTATE->gpr[2]
+ | ld CARG4, CCSTATE->gpr[3]
+ | .FPU ldc1 FARG4, CCSTATE->gpr[3]
+ | ld CARG5, CCSTATE->gpr[4]
+ | .FPU ldc1 FARG5, CCSTATE->gpr[4]
+ | ld CARG6, CCSTATE->gpr[5]
+ | .FPU ldc1 FARG6, CCSTATE->gpr[5]
+ | ld CARG7, CCSTATE->gpr[6]
+ | .FPU ldc1 FARG7, CCSTATE->gpr[6]
+ | ld CARG8, CCSTATE->gpr[7]
+ | .FPU ldc1 FARG8, CCSTATE->gpr[7]
+ | jalr CFUNCADDR
+ |. ld CARG1, CCSTATE->gpr[0] // Do this last, since CCSTATE is CARG1.
+ | ld CCSTATE:TMP1, -24(r16)
+ | ld TMP2, -16(r16)
+ | ld ra, -8(r16)
+ | sd CRET1, CCSTATE:TMP1->gpr[0]
+ | sd CRET2, CCSTATE:TMP1->gpr[1]
+ |.if FPU
+ | sdc1 FRET1, CCSTATE:TMP1->fpr[0]
+ | sdc1 FRET2, CCSTATE:TMP1->fpr[1]
+ |.else
+ | sd CARG1, CCSTATE:TMP1->gpr[2] // 2nd FP struct field for soft-float.
+ |.endif
+ | move sp, r16
+ | jr ra
+ |. move r16, TMP2
+ |.endif
+ |// Note: vm_ffi_call must be the last function in this object file!
+ |
+ |//-----------------------------------------------------------------------
+}
+
+/* Generate the code for a single instruction. */
+static void build_ins(BuildCtx *ctx, BCOp op, int defop)
+{
+ int vk = 0;
+ |=>defop:
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ /* Remember: all ops branch for a true comparison, fall through otherwise. */
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+ | // RA = src1*8, RD = src2*8, JMP with RD = target
+ |.macro bc_comp, FRA, FRD, ARGRA, ARGRD, movop, fmovop, fcomp, sfcomp
+ | daddu RA, BASE, RA
+ | daddu RD, BASE, RD
+ | ld ARGRA, 0(RA)
+ | ld ARGRD, 0(RD)
+ | lhu TMP2, OFS_RD(PC)
+ | gettp CARG3, ARGRA
+ | gettp CARG4, ARGRD
+ | bne CARG3, TISNUM, >2
+ |. daddiu PC, PC, 4
+ | bne CARG4, TISNUM, >5
+ |. decode_RD4b TMP2
+ | sextw ARGRA, ARGRA
+ | sextw ARGRD, ARGRD
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | slt AT, CARG1, CARG2
+ | addu TMP2, TMP2, TMP3
+ |.if MIPSR6
+ | movop TMP2, TMP2, AT
+ |.else
+ | movop TMP2, r0, AT
+ |.endif
+ |1:
+ | daddu PC, PC, TMP2
+ | ins_next
+ |
+ |2: // RA is not an integer.
+ | sltiu AT, CARG3, LJ_TISNUM
+ | beqz AT, ->vmeta_comp
+ |. lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | sltiu AT, CARG4, LJ_TISNUM
+ | beqz AT, >4
+ |. decode_RD4b TMP2
+ |.if FPU
+ | ldc1 FRA, 0(RA)
+ | ldc1 FRD, 0(RD)
+ |.endif
+ |3: // RA and RD are both numbers.
+ |.if FPU
+ |.if MIPSR6
+ | fcomp FTMP0, FTMP0, FTMP2
+ | addu TMP2, TMP2, TMP3
+ | mfc1 TMP3, FTMP0
+ | b <1
+ |. fmovop TMP2, TMP2, TMP3
+ |.else
+ | fcomp FTMP0, FTMP2
+ | addu TMP2, TMP2, TMP3
+ | b <1
+ |. fmovop TMP2, r0
+ |.endif
+ |.else
+ | bal sfcomp
+ |. addu TMP2, TMP2, TMP3
+ | b <1
+ |.if MIPSR6
+ |. movop TMP2, TMP2, CRET1
+ |.else
+ |. movop TMP2, r0, CRET1
+ |.endif
+ |.endif
+ |
+ |4: // RA is a number, RD is not a number.
+ | bne CARG4, TISNUM, ->vmeta_comp
+ | // RA is a number, RD is an integer. Convert RD to a number.
+ |.if FPU
+ |. lwc1 FRD, LO(RD)
+ | ldc1 FRA, 0(RA)
+ | b <3
+ |. cvt.d.w FRD, FRD
+ |.else
+ |.if "ARGRD" == "CARG1"
+ |. sextw CARG1, CARG1
+ | bal ->vm_sfi2d_1
+ |. nop
+ |.else
+ |. sextw CARG2, CARG2
+ | bal ->vm_sfi2d_2
+ |. nop
+ |.endif
+ | b <3
+ |. nop
+ |.endif
+ |
+ |5: // RA is an integer, RD is not an integer
+ | sltiu AT, CARG4, LJ_TISNUM
+ | beqz AT, ->vmeta_comp
+ |. lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | // RA is an integer, RD is a number. Convert RA to a number.
+ |.if FPU
+ | lwc1 FRA, LO(RA)
+ | ldc1 FRD, 0(RD)
+ | b <3
+ | cvt.d.w FRA, FRA
+ |.else
+ |.if "ARGRA" == "CARG1"
+ | bal ->vm_sfi2d_1
+ |. sextw CARG1, CARG1
+ |.else
+ | bal ->vm_sfi2d_2
+ |. sextw CARG2, CARG2
+ |.endif
+ | b <3
+ |. nop
+ |.endif
+ |.endmacro
+ |
+ |.if MIPSR6
+ if (op == BC_ISLT) {
+ | bc_comp FTMP0, FTMP2, CARG1, CARG2, selnez, selnez, cmp.lt.d, ->vm_sfcmpolt
+ } else if (op == BC_ISGE) {
+ | bc_comp FTMP0, FTMP2, CARG1, CARG2, seleqz, seleqz, cmp.lt.d, ->vm_sfcmpolt
+ } else if (op == BC_ISLE) {
+ | bc_comp FTMP2, FTMP0, CARG2, CARG1, seleqz, seleqz, cmp.ult.d, ->vm_sfcmpult
+ } else {
+ | bc_comp FTMP2, FTMP0, CARG2, CARG1, selnez, selnez, cmp.ult.d, ->vm_sfcmpult
+ }
+ |.else
+ if (op == BC_ISLT) {
+ | bc_comp FTMP0, FTMP2, CARG1, CARG2, movz, movf, c.olt.d, ->vm_sfcmpolt
+ } else if (op == BC_ISGE) {
+ | bc_comp FTMP0, FTMP2, CARG1, CARG2, movn, movt, c.olt.d, ->vm_sfcmpolt
+ } else if (op == BC_ISLE) {
+ | bc_comp FTMP2, FTMP0, CARG2, CARG1, movn, movt, c.ult.d, ->vm_sfcmpult
+ } else {
+ | bc_comp FTMP2, FTMP0, CARG2, CARG1, movz, movf, c.ult.d, ->vm_sfcmpult
+ }
+ |.endif
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ vk = op == BC_ISEQV;
+ | // RA = src1*8, RD = src2*8, JMP with RD = target
+ | daddu RA, BASE, RA
+ | daddiu PC, PC, 4
+ | daddu RD, BASE, RD
+ | ld CARG1, 0(RA)
+ | lhu TMP2, -4+OFS_RD(PC)
+ | ld CARG2, 0(RD)
+ | gettp CARG3, CARG1
+ | gettp CARG4, CARG2
+ | sltu AT, TISNUM, CARG3
+ | sltu TMP1, TISNUM, CARG4
+ | or AT, AT, TMP1
+ if (vk) {
+ | beqz AT, ->BC_ISEQN_Z
+ } else {
+ | beqz AT, ->BC_ISNEN_Z
+ }
+ | // Either or both types are not numbers.
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ |.if FFI
+ |. li AT, LJ_TCDATA
+ | beq CARG3, AT, ->vmeta_equal_cd
+ |.endif
+ | decode_RD4b TMP2
+ |.if FFI
+ | beq CARG4, AT, ->vmeta_equal_cd
+ |. nop
+ |.endif
+ | bne CARG1, CARG2, >2
+ |. addu TMP2, TMP2, TMP3
+ | // Tag and value are equal.
+ if (vk) {
+ |->BC_ISEQV_Z:
+ | daddu PC, PC, TMP2
+ }
+ |1:
+ | ins_next
+ |
+ |2: // Check if the tags are the same and it's a table or userdata.
+ | xor AT, CARG3, CARG4 // Same type?
+ | sltiu TMP0, CARG3, LJ_TISTABUD+1 // Table or userdata?
+ |.if MIPSR6
+ | seleqz TMP0, TMP0, AT
+ |.else
+ | movn TMP0, r0, AT
+ |.endif
+ if (vk) {
+ | beqz TMP0, <1
+ } else {
+ | beqz TMP0, ->BC_ISEQV_Z // Reuse code from opposite instruction.
+ }
+ | // Different tables or userdatas. Need to check __eq metamethod.
+ | // Field metatable must be at same offset for GCtab and GCudata!
+ |. cleartp TAB:TMP1, CARG1
+ | ld TAB:TMP3, TAB:TMP1->metatable
+ if (vk) {
+ | beqz TAB:TMP3, <1 // No metatable?
+ |. nop
+ | lbu TMP3, TAB:TMP3->nomm
+ | andi TMP3, TMP3, 1<<MM_eq
+ | bnez TMP3, >1 // Or 'no __eq' flag set?
+ } else {
+ | beqz TAB:TMP3,->BC_ISEQV_Z // No metatable?
+ |. nop
+ | lbu TMP3, TAB:TMP3->nomm
+ | andi TMP3, TMP3, 1<<MM_eq
+ | bnez TMP3, ->BC_ISEQV_Z // Or 'no __eq' flag set?
+ }
+ |. nop
+ | b ->vmeta_equal // Handle __eq metamethod.
+ |. li TMP0, 1-vk // ne = 0 or 1.
+ break;
+
+ case BC_ISEQS: case BC_ISNES:
+ vk = op == BC_ISEQS;
+ | // RA = src*8, RD = str_const*8 (~), JMP with RD = target
+ | daddu RA, BASE, RA
+ | daddiu PC, PC, 4
+ | ld CARG1, 0(RA)
+ | dsubu RD, KBASE, RD
+ | lhu TMP2, -4+OFS_RD(PC)
+ | ld CARG2, -8(RD) // KBASE-8-str_const*8
+ |.if FFI
+ | gettp TMP0, CARG1
+ | li AT, LJ_TCDATA
+ |.endif
+ | li TMP1, LJ_TSTR
+ | decode_RD4b TMP2
+ |.if FFI
+ | beq TMP0, AT, ->vmeta_equal_cd
+ |.endif
+ |. settp CARG2, TMP1
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | xor TMP1, CARG1, CARG2
+ | addu TMP2, TMP2, TMP3
+ |.if MIPSR6
+ if (vk) {
+ | seleqz TMP2, TMP2, TMP1
+ } else {
+ | selnez TMP2, TMP2, TMP1
+ }
+ |.else
+ if (vk) {
+ | movn TMP2, r0, TMP1
+ } else {
+ | movz TMP2, r0, TMP1
+ }
+ |.endif
+ | daddu PC, PC, TMP2
+ | ins_next
+ break;
+
+ case BC_ISEQN: case BC_ISNEN:
+ vk = op == BC_ISEQN;
+ | // RA = src*8, RD = num_const*8, JMP with RD = target
+ | daddu RA, BASE, RA
+ | daddu RD, KBASE, RD
+ | ld CARG1, 0(RA)
+ | ld CARG2, 0(RD)
+ | lhu TMP2, OFS_RD(PC)
+ | gettp CARG3, CARG1
+ | gettp CARG4, CARG2
+ | daddiu PC, PC, 4
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ if (vk) {
+ |->BC_ISEQN_Z:
+ } else {
+ |->BC_ISNEN_Z:
+ }
+ | bne CARG3, TISNUM, >3
+ |. decode_RD4b TMP2
+ | bne CARG4, TISNUM, >6
+ |. addu TMP2, TMP2, TMP3
+ | xor AT, CARG1, CARG2
+ |.if MIPSR6
+ if (vk) {
+ | seleqz TMP2, TMP2, AT
+ |1:
+ | daddu PC, PC, TMP2
+ |2:
+ } else {
+ | selnez TMP2, TMP2, AT
+ |1:
+ |2:
+ | daddu PC, PC, TMP2
+ }
+ |.else
+ if (vk) {
+ | movn TMP2, r0, AT
+ |1:
+ | daddu PC, PC, TMP2
+ |2:
+ } else {
+ | movz TMP2, r0, AT
+ |1:
+ |2:
+ | daddu PC, PC, TMP2
+ }
+ |.endif
+ | ins_next
+ |
+ |3: // RA is not an integer.
+ | sltu AT, CARG3, TISNUM
+ |.if FFI
+ | beqz AT, >8
+ |.else
+ | beqz AT, <2
+ |.endif
+ |. addu TMP2, TMP2, TMP3
+ | sltu AT, CARG4, TISNUM
+ |.if FPU
+ | ldc1 FTMP0, 0(RA)
+ | ldc1 FTMP2, 0(RD)
+ |.endif
+ | beqz AT, >5
+ |. nop
+ |4: // RA and RD are both numbers.
+ |.if FPU
+ |.if MIPSR6
+ | cmp.eq.d FTMP0, FTMP0, FTMP2
+ | dmfc1 TMP1, FTMP0
+ | b <1
+ if (vk) {
+ |. selnez TMP2, TMP2, TMP1
+ } else {
+ |. seleqz TMP2, TMP2, TMP1
+ }
+ |.else
+ | c.eq.d FTMP0, FTMP2
+ | b <1
+ if (vk) {
+ |. movf TMP2, r0
+ } else {
+ |. movt TMP2, r0
+ }
+ |.endif
+ |.else
+ | bal ->vm_sfcmpeq
+ |. nop
+ | b <1
+ |.if MIPSR6
+ if (vk) {
+ |. selnez TMP2, TMP2, CRET1
+ } else {
+ |. seleqz TMP2, TMP2, CRET1
+ }
+ |.else
+ if (vk) {
+ |. movz TMP2, r0, CRET1
+ } else {
+ |. movn TMP2, r0, CRET1
+ }
+ |.endif
+ |.endif
+ |
+ |5: // RA is a number, RD is not a number.
+ |.if FFI
+ | bne CARG4, TISNUM, >9
+ |.else
+ | bne CARG4, TISNUM, <2
+ |.endif
+ | // RA is a number, RD is an integer. Convert RD to a number.
+ |.if FPU
+ |. lwc1 FTMP2, LO(RD)
+ | b <4
+ |. cvt.d.w FTMP2, FTMP2
+ |.else
+ |. sextw CARG2, CARG2
+ | bal ->vm_sfi2d_2
+ |. nop
+ | b <4
+ |. nop
+ |.endif
+ |
+ |6: // RA is an integer, RD is not an integer
+ | sltu AT, CARG4, TISNUM
+ |.if FFI
+ | beqz AT, >9
+ |.else
+ | beqz AT, <2
+ |.endif
+ | // RA is an integer, RD is a number. Convert RA to a number.
+ |.if FPU
+ |. lwc1 FTMP0, LO(RA)
+ | ldc1 FTMP2, 0(RD)
+ | b <4
+ | cvt.d.w FTMP0, FTMP0
+ |.else
+ |. sextw CARG1, CARG1
+ | bal ->vm_sfi2d_1
+ |. nop
+ | b <4
+ |. nop
+ |.endif
+ |
+ |.if FFI
+ |8:
+ | li AT, LJ_TCDATA
+ | bne CARG3, AT, <2
+ |. nop
+ | b ->vmeta_equal_cd
+ |. nop
+ |9:
+ | li AT, LJ_TCDATA
+ | bne CARG4, AT, <2
+ |. nop
+ | b ->vmeta_equal_cd
+ |. nop
+ |.endif
+ break;
+
+ case BC_ISEQP: case BC_ISNEP:
+ vk = op == BC_ISEQP;
+ | // RA = src*8, RD = primitive_type*8 (~), JMP with RD = target
+ | daddu RA, BASE, RA
+ | srl TMP1, RD, 3
+ | ld TMP0, 0(RA)
+ | lhu TMP2, OFS_RD(PC)
+ | not TMP1, TMP1
+ | gettp TMP0, TMP0
+ | daddiu PC, PC, 4
+ |.if FFI
+ | li AT, LJ_TCDATA
+ | beq TMP0, AT, ->vmeta_equal_cd
+ |.endif
+ |. xor TMP0, TMP0, TMP1
+ | decode_RD4b TMP2
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | addu TMP2, TMP2, TMP3
+ |.if MIPSR6
+ if (vk) {
+ | seleqz TMP2, TMP2, TMP0
+ } else {
+ | selnez TMP2, TMP2, TMP0
+ }
+ |.else
+ if (vk) {
+ | movn TMP2, r0, TMP0
+ } else {
+ | movz TMP2, r0, TMP0
+ }
+ |.endif
+ | daddu PC, PC, TMP2
+ | ins_next
+ break;
+
+ /* -- Unary test and copy ops ------------------------------------------- */
+
+ case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
+ | // RA = dst*8 or unused, RD = src*8, JMP with RD = target
+ | daddu RD, BASE, RD
+ | lhu TMP2, OFS_RD(PC)
+ | ld TMP0, 0(RD)
+ | daddiu PC, PC, 4
+ | gettp TMP0, TMP0
+ | sltiu TMP0, TMP0, LJ_TISTRUECOND
+ if (op == BC_IST || op == BC_ISF) {
+ | decode_RD4b TMP2
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | addu TMP2, TMP2, TMP3
+ |.if MIPSR6
+ if (op == BC_IST) {
+ | selnez TMP2, TMP2, TMP0;
+ } else {
+ | seleqz TMP2, TMP2, TMP0;
+ }
+ |.else
+ if (op == BC_IST) {
+ | movz TMP2, r0, TMP0
+ } else {
+ | movn TMP2, r0, TMP0
+ }
+ |.endif
+ | daddu PC, PC, TMP2
+ } else {
+ | ld CRET1, 0(RD)
+ if (op == BC_ISTC) {
+ | beqz TMP0, >1
+ } else {
+ | bnez TMP0, >1
+ }
+ |. daddu RA, BASE, RA
+ | decode_RD4b TMP2
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | addu TMP2, TMP2, TMP3
+ | sd CRET1, 0(RA)
+ | daddu PC, PC, TMP2
+ |1:
+ }
+ | ins_next
+ break;
+
+ case BC_ISTYPE:
+ | // RA = src*8, RD = -type*8
+ | daddu TMP2, BASE, RA
+ | srl TMP1, RD, 3
+ | ld TMP0, 0(TMP2)
+ | ins_next1
+ | gettp TMP0, TMP0
+ | daddu AT, TMP0, TMP1
+ | bnez AT, ->vmeta_istype
+ |. ins_next2
+ break;
+ case BC_ISNUM:
+ | // RA = src*8, RD = -(TISNUM-1)*8
+ | daddu TMP2, BASE, RA
+ | ld TMP0, 0(TMP2)
+ | ins_next1
+ | checknum TMP0, ->vmeta_istype
+ |. ins_next2
+ break;
+
+ /* -- Unary ops --------------------------------------------------------- */
+
+ case BC_MOV:
+ | // RA = dst*8, RD = src*8
+ | daddu RD, BASE, RD
+ | daddu RA, BASE, RA
+ | ld CRET1, 0(RD)
+ | ins_next1
+ | sd CRET1, 0(RA)
+ | ins_next2
+ break;
+ case BC_NOT:
+ | // RA = dst*8, RD = src*8
+ | daddu RD, BASE, RD
+ | daddu RA, BASE, RA
+ | ld TMP0, 0(RD)
+ | li AT, LJ_TTRUE
+ | gettp TMP0, TMP0
+ | sltu TMP0, AT, TMP0
+ | addiu TMP0, TMP0, 1
+ | dsll TMP0, TMP0, 47
+ | not TMP0, TMP0
+ | ins_next1
+ | sd TMP0, 0(RA)
+ | ins_next2
+ break;
+ case BC_UNM:
+ | // RA = dst*8, RD = src*8
+ | daddu RB, BASE, RD
+ | ld CARG1, 0(RB)
+ | daddu RA, BASE, RA
+ | gettp CARG3, CARG1
+ | bne CARG3, TISNUM, >2
+ |. lui TMP1, 0x8000
+ | sextw CARG1, CARG1
+ | beq CARG1, TMP1, ->vmeta_unm // Meta handler deals with -2^31.
+ |. negu CARG1, CARG1
+ | zextw CARG1, CARG1
+ | settp CARG1, TISNUM
+ |1:
+ | ins_next1
+ | sd CARG1, 0(RA)
+ | ins_next2
+ |2:
+ | sltiu AT, CARG3, LJ_TISNUM
+ | beqz AT, ->vmeta_unm
+ |. dsll TMP1, TMP1, 32
+ | b <1
+ |. xor CARG1, CARG1, TMP1
+ break;
+ case BC_LEN:
+ | // RA = dst*8, RD = src*8
+ | daddu CARG2, BASE, RD
+ | daddu RA, BASE, RA
+ | ld TMP0, 0(CARG2)
+ | gettp TMP1, TMP0
+ | daddiu AT, TMP1, -LJ_TSTR
+ | bnez AT, >2
+ |. cleartp STR:CARG1, TMP0
+ | lw CRET1, STR:CARG1->len
+ |1:
+ | settp CRET1, TISNUM
+ | ins_next1
+ | sd CRET1, 0(RA)
+ | ins_next2
+ |2:
+ | daddiu AT, TMP1, -LJ_TTAB
+ | bnez AT, ->vmeta_len
+ |. nop
+#if LJ_52
+ | ld TAB:TMP2, TAB:CARG1->metatable
+ | bnez TAB:TMP2, >9
+ |. nop
+ |3:
+#endif
+ |->BC_LEN_Z:
+ | load_got lj_tab_len
+ | call_intern lj_tab_len // (GCtab *t)
+ |. nop
+ | // Returns uint32_t (but less than 2^31).
+ | b <1
+ |. nop
+#if LJ_52
+ |9:
+ | lbu TMP0, TAB:TMP2->nomm
+ | andi TMP0, TMP0, 1<<MM_len
+ | bnez TMP0, <3 // 'no __len' flag set: done.
+ |. nop
+ | b ->vmeta_len
+ |. nop
+#endif
+ break;
+
+ /* -- Binary ops -------------------------------------------------------- */
+
+ |.macro fpmod, a, b, c
+ | bal ->vm_floor // floor(b/c)
+ |. div.d FARG1, b, c
+ | mul.d a, FRET1, c
+ | sub.d a, b, a // b - floor(b/c)*c
+ |.endmacro
+
+ |.macro sfpmod
+ | daddiu sp, sp, -16
+ |
+ | load_got __divdf3
+ | sd CARG1, 0(sp)
+ | call_extern
+ |. sd CARG2, 8(sp)
+ |
+ | load_got floor
+ | call_extern
+ |. move CARG1, CRET1
+ |
+ | load_got __muldf3
+ | move CARG1, CRET1
+ | call_extern
+ |. ld CARG2, 8(sp)
+ |
+ | load_got __subdf3
+ | ld CARG1, 0(sp)
+ | call_extern
+ |. move CARG2, CRET1
+ |
+ | daddiu sp, sp, 16
+ |.endmacro
+
+ |.macro ins_arithpre, label
+ ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
+ | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8
+ ||switch (vk) {
+ ||case 0:
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | // RA = dst*8, RB = src1*8, RC = num_const*8
+ | daddu RB, BASE, RB
+ |.if "label" ~= "none"
+ | b label
+ |.endif
+ |. daddu RC, KBASE, RC
+ || break;
+ ||case 1:
+ | decode_RB8a RC, INS
+ | decode_RB8b RC
+ | decode_RDtoRC8 RB, RD
+ | // RA = dst*8, RB = num_const*8, RC = src1*8
+ | daddu RC, BASE, RC
+ |.if "label" ~= "none"
+ | b label
+ |.endif
+ |. daddu RB, KBASE, RB
+ || break;
+ ||default:
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | // RA = dst*8, RB = src1*8, RC = src2*8
+ | daddu RB, BASE, RB
+ |.if "label" ~= "none"
+ | b label
+ |.endif
+ |. daddu RC, BASE, RC
+ || break;
+ ||}
+ |.endmacro
+ |
+ |.macro ins_arith, intins, fpins, fpcall, label
+ | ins_arithpre none
+ |
+ |.if "label" ~= "none"
+ |label:
+ |.endif
+ |
+ |// Used in 5.
+ | ld CARG1, 0(RB)
+ | ld CARG2, 0(RC)
+ | gettp TMP0, CARG1
+ | gettp TMP1, CARG2
+ |
+ |.if "intins" ~= "div"
+ |
+ | // Check for two integers.
+ | sextw CARG3, CARG1
+ | bne TMP0, TISNUM, >5
+ |. sextw CARG4, CARG2
+ | bne TMP1, TISNUM, >5
+ |
+ |.if "intins" == "addu"
+ |. intins CRET1, CARG3, CARG4
+ | xor TMP1, CRET1, CARG3 // ((y^a) & (y^b)) < 0: overflow.
+ | xor TMP2, CRET1, CARG4
+ | and TMP1, TMP1, TMP2
+ | bltz TMP1, ->vmeta_arith
+ |. daddu RA, BASE, RA
+ |.elif "intins" == "subu"
+ |. intins CRET1, CARG3, CARG4
+ | xor TMP1, CRET1, CARG3 // ((y^a) & (a^b)) < 0: overflow.
+ | xor TMP2, CARG3, CARG4
+ | and TMP1, TMP1, TMP2
+ | bltz TMP1, ->vmeta_arith
+ |. daddu RA, BASE, RA
+ |.elif "intins" == "mult"
+ |.if MIPSR6
+ |. nop
+ | mul CRET1, CARG3, CARG4
+ | muh TMP2, CARG3, CARG4
+ |.else
+ |. intins CARG3, CARG4
+ | mflo CRET1
+ | mfhi TMP2
+ |.endif
+ | sra TMP1, CRET1, 31
+ | bne TMP1, TMP2, ->vmeta_arith
+ |. daddu RA, BASE, RA
+ |.else
+ |. load_got lj_vm_modi
+ | beqz CARG4, ->vmeta_arith
+ |. daddu RA, BASE, RA
+ | move CARG1, CARG3
+ | call_extern
+ |. move CARG2, CARG4
+ |.endif
+ |
+ | zextw CRET1, CRET1
+ | settp CRET1, TISNUM
+ | ins_next1
+ | sd CRET1, 0(RA)
+ |3:
+ | ins_next2
+ |
+ |.endif
+ |
+ |5: // Check for two numbers.
+ | .FPU ldc1 FTMP0, 0(RB)
+ | sltu AT, TMP0, TISNUM
+ | sltu TMP0, TMP1, TISNUM
+ | .FPU ldc1 FTMP2, 0(RC)
+ | and AT, AT, TMP0
+ | beqz AT, ->vmeta_arith
+ |. daddu RA, BASE, RA
+ |
+ |.if FPU
+ | fpins FRET1, FTMP0, FTMP2
+ |.elif "fpcall" == "sfpmod"
+ | sfpmod
+ |.else
+ | load_got fpcall
+ | call_extern
+ |. nop
+ |.endif
+ |
+ | ins_next1
+ |.if "intins" ~= "div"
+ | b <3
+ |.endif
+ |.if FPU
+ |. sdc1 FRET1, 0(RA)
+ |.else
+ |. sd CRET1, 0(RA)
+ |.endif
+ |.if "intins" == "div"
+ | ins_next2
+ |.endif
+ |
+ |.endmacro
+
+ case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
+ | ins_arith addu, add.d, __adddf3, none
+ break;
+ case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
+ | ins_arith subu, sub.d, __subdf3, none
+ break;
+ case BC_MULVN: case BC_MULNV: case BC_MULVV:
+ | ins_arith mult, mul.d, __muldf3, none
+ break;
+ case BC_DIVVN:
+ | ins_arith div, div.d, __divdf3, ->BC_DIVVN_Z
+ break;
+ case BC_DIVNV: case BC_DIVVV:
+ | ins_arithpre ->BC_DIVVN_Z
+ break;
+ case BC_MODVN:
+ | ins_arith modi, fpmod, sfpmod, ->BC_MODVN_Z
+ break;
+ case BC_MODNV: case BC_MODVV:
+ | ins_arithpre ->BC_MODVN_Z
+ break;
+ case BC_POW:
+ | ins_arithpre none
+ | ld CARG1, 0(RB)
+ | ld CARG2, 0(RC)
+ | gettp TMP0, CARG1
+ | gettp TMP1, CARG2
+ | sltiu TMP0, TMP0, LJ_TISNUM
+ | sltiu TMP1, TMP1, LJ_TISNUM
+ | and AT, TMP0, TMP1
+ | load_got pow
+ | beqz AT, ->vmeta_arith
+ |. daddu RA, BASE, RA
+ |.if FPU
+ | ldc1 FARG1, 0(RB)
+ | ldc1 FARG2, 0(RC)
+ |.endif
+ | call_extern
+ |. nop
+ | ins_next1
+ |.if FPU
+ | sdc1 FRET1, 0(RA)
+ |.else
+ | sd CRET1, 0(RA)
+ |.endif
+ | ins_next2
+ break;
+
+ case BC_CAT:
+ | // RA = dst*8, RB = src_start*8, RC = src_end*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | dsubu CARG3, RC, RB
+ | sd BASE, L->base
+ | daddu CARG2, BASE, RC
+ | move MULTRES, RB
+ |->BC_CAT_Z:
+ | load_got lj_meta_cat
+ | srl CARG3, CARG3, 3
+ | sd PC, SAVE_PC
+ | call_intern lj_meta_cat // (lua_State *L, TValue *top, int left)
+ |. move CARG1, L
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | bnez CRET1, ->vmeta_binop
+ |. ld BASE, L->base
+ | daddu RB, BASE, MULTRES
+ | ld CRET1, 0(RB)
+ | daddu RA, BASE, RA
+ | ins_next1
+ | sd CRET1, 0(RA)
+ | ins_next2
+ break;
+
+ /* -- Constant ops ------------------------------------------------------ */
+
+ case BC_KSTR:
+ | // RA = dst*8, RD = str_const*8 (~)
+ | dsubu TMP1, KBASE, RD
+ | ins_next1
+ | li TMP2, LJ_TSTR
+ | ld TMP0, -8(TMP1) // KBASE-8-str_const*8
+ | daddu RA, BASE, RA
+ | settp TMP0, TMP2
+ | sd TMP0, 0(RA)
+ | ins_next2
+ break;
+ case BC_KCDATA:
+ |.if FFI
+ | // RA = dst*8, RD = cdata_const*8 (~)
+ | dsubu TMP1, KBASE, RD
+ | ins_next1
+ | ld TMP0, -8(TMP1) // KBASE-8-cdata_const*8
+ | li TMP2, LJ_TCDATA
+ | daddu RA, BASE, RA
+ | settp TMP0, TMP2
+ | sd TMP0, 0(RA)
+ | ins_next2
+ |.endif
+ break;
+ case BC_KSHORT:
+ | // RA = dst*8, RD = int16_literal*8
+ | sra RD, INS, 16
+ | daddu RA, BASE, RA
+ | zextw RD, RD
+ | ins_next1
+ | settp RD, TISNUM
+ | sd RD, 0(RA)
+ | ins_next2
+ break;
+ case BC_KNUM:
+ | // RA = dst*8, RD = num_const*8
+ | daddu RD, KBASE, RD
+ | daddu RA, BASE, RA
+ | ld CRET1, 0(RD)
+ | ins_next1
+ | sd CRET1, 0(RA)
+ | ins_next2
+ break;
+ case BC_KPRI:
+ | // RA = dst*8, RD = primitive_type*8 (~)
+ | daddu RA, BASE, RA
+ | dsll TMP0, RD, 44
+ | not TMP0, TMP0
+ | ins_next1
+ | sd TMP0, 0(RA)
+ | ins_next2
+ break;
+ case BC_KNIL:
+ | // RA = base*8, RD = end*8
+ | daddu RA, BASE, RA
+ | sd TISNIL, 0(RA)
+ | daddiu RA, RA, 8
+ | daddu RD, BASE, RD
+ |1:
+ | sd TISNIL, 0(RA)
+ | slt AT, RA, RD
+ | bnez AT, <1
+ |. daddiu RA, RA, 8
+ | ins_next_
+ break;
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ | // RA = dst*8, RD = uvnum*8
+ | ld LFUNC:RB, FRAME_FUNC(BASE)
+ | daddu RA, BASE, RA
+ | cleartp LFUNC:RB
+ | daddu RD, RD, LFUNC:RB
+ | ld UPVAL:RB, LFUNC:RD->uvptr
+ | ins_next1
+ | ld TMP1, UPVAL:RB->v
+ | ld CRET1, 0(TMP1)
+ | sd CRET1, 0(RA)
+ | ins_next2
+ break;
+ case BC_USETV:
+ | // RA = uvnum*8, RD = src*8
+ | ld LFUNC:RB, FRAME_FUNC(BASE)
+ | daddu RD, BASE, RD
+ | cleartp LFUNC:RB
+ | daddu RA, RA, LFUNC:RB
+ | ld UPVAL:RB, LFUNC:RA->uvptr
+ | ld CRET1, 0(RD)
+ | lbu TMP3, UPVAL:RB->marked
+ | ld CARG2, UPVAL:RB->v
+ | andi TMP3, TMP3, LJ_GC_BLACK // isblack(uv)
+ | lbu TMP0, UPVAL:RB->closed
+ | gettp TMP2, CRET1
+ | sd CRET1, 0(CARG2)
+ | li AT, LJ_GC_BLACK|1
+ | or TMP3, TMP3, TMP0
+ | beq TMP3, AT, >2 // Upvalue is closed and black?
+ |. daddiu TMP2, TMP2, -(LJ_TNUMX+1)
+ |1:
+ | ins_next
+ |
+ |2: // Check if new value is collectable.
+ | sltiu AT, TMP2, LJ_TISGCV - (LJ_TNUMX+1)
+ | beqz AT, <1 // tvisgcv(v)
+ |. cleartp GCOBJ:CRET1, CRET1
+ | lbu TMP3, GCOBJ:CRET1->gch.marked
+ | andi TMP3, TMP3, LJ_GC_WHITES // iswhite(v)
+ | beqz TMP3, <1
+ |. load_got lj_gc_barrieruv
+ | // Crossed a write barrier. Move the barrier forward.
+ | call_intern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ |. daddiu CARG1, DISPATCH, GG_DISP2G
+ | b <1
+ |. nop
+ break;
+ case BC_USETS:
+ | // RA = uvnum*8, RD = str_const*8 (~)
+ | ld LFUNC:RB, FRAME_FUNC(BASE)
+ | dsubu TMP1, KBASE, RD
+ | cleartp LFUNC:RB
+ | daddu RA, RA, LFUNC:RB
+ | ld UPVAL:RB, LFUNC:RA->uvptr
+ | ld STR:TMP1, -8(TMP1) // KBASE-8-str_const*8
+ | lbu TMP2, UPVAL:RB->marked
+ | ld CARG2, UPVAL:RB->v
+ | lbu TMP3, STR:TMP1->marked
+ | andi AT, TMP2, LJ_GC_BLACK // isblack(uv)
+ | lbu TMP2, UPVAL:RB->closed
+ | li TMP0, LJ_TSTR
+ | settp TMP1, TMP0
+ | bnez AT, >2
+ |. sd TMP1, 0(CARG2)
+ |1:
+ | ins_next
+ |
+ |2: // Check if string is white and ensure upvalue is closed.
+ | beqz TMP2, <1
+ |. andi AT, TMP3, LJ_GC_WHITES // iswhite(str)
+ | beqz AT, <1
+ |. load_got lj_gc_barrieruv
+ | // Crossed a write barrier. Move the barrier forward.
+ | call_intern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ |. daddiu CARG1, DISPATCH, GG_DISP2G
+ | b <1
+ |. nop
+ break;
+ case BC_USETN:
+ | // RA = uvnum*8, RD = num_const*8
+ | ld LFUNC:RB, FRAME_FUNC(BASE)
+ | daddu RD, KBASE, RD
+ | cleartp LFUNC:RB
+ | daddu RA, RA, LFUNC:RB
+ | ld UPVAL:RB, LFUNC:RA->uvptr
+ | ld CRET1, 0(RD)
+ | ld TMP1, UPVAL:RB->v
+ | ins_next1
+ | sd CRET1, 0(TMP1)
+ | ins_next2
+ break;
+ case BC_USETP:
+ | // RA = uvnum*8, RD = primitive_type*8 (~)
+ | ld LFUNC:RB, FRAME_FUNC(BASE)
+ | dsll TMP0, RD, 44
+ | cleartp LFUNC:RB
+ | daddu RA, RA, LFUNC:RB
+ | not TMP0, TMP0
+ | ld UPVAL:RB, LFUNC:RA->uvptr
+ | ins_next1
+ | ld TMP1, UPVAL:RB->v
+ | sd TMP0, 0(TMP1)
+ | ins_next2
+ break;
+
+ case BC_UCLO:
+ | // RA = level*8, RD = target
+ | ld TMP2, L->openupval
+ | branch_RD // Do this first since RD is not saved.
+ | load_got lj_func_closeuv
+ | sd BASE, L->base
+ | beqz TMP2, >1
+ |. move CARG1, L
+ | call_intern lj_func_closeuv // (lua_State *L, TValue *level)
+ |. daddu CARG2, BASE, RA
+ | ld BASE, L->base
+ |1:
+ | ins_next
+ break;
+
+ case BC_FNEW:
+ | // RA = dst*8, RD = proto_const*8 (~) (holding function prototype)
+ | load_got lj_func_newL_gc
+ | dsubu TMP1, KBASE, RD
+ | ld CARG3, FRAME_FUNC(BASE)
+ | ld CARG2, -8(TMP1) // KBASE-8-tab_const*8
+ | sd BASE, L->base
+ | sd PC, SAVE_PC
+ | cleartp CARG3
+ | // (lua_State *L, GCproto *pt, GCfuncL *parent)
+ | call_intern lj_func_newL_gc
+ |. move CARG1, L
+ | // Returns GCfuncL *.
+ | li TMP0, LJ_TFUNC
+ | ld BASE, L->base
+ | ins_next1
+ | settp CRET1, TMP0
+ | daddu RA, BASE, RA
+ | sd CRET1, 0(RA)
+ | ins_next2
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_TNEW:
+ case BC_TDUP:
+ | // RA = dst*8, RD = (hbits|asize)*8 | tab_const*8 (~)
+ | ld TMP0, DISPATCH_GL(gc.total)(DISPATCH)
+ | ld TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
+ | sd BASE, L->base
+ | sd PC, SAVE_PC
+ | sltu AT, TMP0, TMP1
+ | beqz AT, >5
+ |1:
+ if (op == BC_TNEW) {
+ | load_got lj_tab_new
+ | srl CARG2, RD, 3
+ | andi CARG2, CARG2, 0x7ff
+ | li TMP0, 0x801
+ | addiu AT, CARG2, -0x7ff
+ | srl CARG3, RD, 14
+ |.if MIPSR6
+ | seleqz TMP0, TMP0, AT
+ | selnez CARG2, CARG2, AT
+ | or CARG2, CARG2, TMP0
+ |.else
+ | movz CARG2, TMP0, AT
+ |.endif
+ | // (lua_State *L, int32_t asize, uint32_t hbits)
+ | call_intern lj_tab_new
+ |. move CARG1, L
+ | // Returns Table *.
+ } else {
+ | load_got lj_tab_dup
+ | dsubu TMP1, KBASE, RD
+ | move CARG1, L
+ | call_intern lj_tab_dup // (lua_State *L, Table *kt)
+ |. ld CARG2, -8(TMP1) // KBASE-8-str_const*8
+ | // Returns Table *.
+ }
+ | li TMP0, LJ_TTAB
+ | ld BASE, L->base
+ | ins_next1
+ | daddu RA, BASE, RA
+ | settp CRET1, TMP0
+ | sd CRET1, 0(RA)
+ | ins_next2
+ |5:
+ | load_got lj_gc_step_fixtop
+ | move MULTRES, RD
+ | call_intern lj_gc_step_fixtop // (lua_State *L)
+ |. move CARG1, L
+ | b <1
+ |. move RD, MULTRES
+ break;
+
+ case BC_GGET:
+ | // RA = dst*8, RD = str_const*8 (~)
+ case BC_GSET:
+ | // RA = src*8, RD = str_const*8 (~)
+ | ld LFUNC:TMP2, FRAME_FUNC(BASE)
+ | dsubu TMP1, KBASE, RD
+ | ld STR:RC, -8(TMP1) // KBASE-8-str_const*8
+ | cleartp LFUNC:TMP2
+ | ld TAB:RB, LFUNC:TMP2->env
+ if (op == BC_GGET) {
+ | b ->BC_TGETS_Z
+ } else {
+ | b ->BC_TSETS_Z
+ }
+ |. daddu RA, BASE, RA
+ break;
+
+ case BC_TGETV:
+ | // RA = dst*8, RB = table*8, RC = key*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | daddu CARG2, BASE, RB
+ | daddu CARG3, BASE, RC
+ | ld TAB:RB, 0(CARG2)
+ | ld TMP2, 0(CARG3)
+ | daddu RA, BASE, RA
+ | checktab TAB:RB, ->vmeta_tgetv
+ | gettp TMP3, TMP2
+ | bne TMP3, TISNUM, >5 // Integer key?
+ |. lw TMP0, TAB:RB->asize
+ | sextw TMP2, TMP2
+ | ld TMP1, TAB:RB->array
+ | sltu AT, TMP2, TMP0
+ | sll TMP2, TMP2, 3
+ | beqz AT, ->vmeta_tgetv // Integer key and in array part?
+ |. daddu TMP2, TMP1, TMP2
+ | ld AT, 0(TMP2)
+ | beq AT, TISNIL, >2
+ |. ld CRET1, 0(TMP2)
+ |1:
+ | ins_next1
+ | sd CRET1, 0(RA)
+ | ins_next2
+ |
+ |2: // Check for __index if table value is nil.
+ | ld TAB:TMP2, TAB:RB->metatable
+ | beqz TAB:TMP2, <1 // No metatable: done.
+ |. nop
+ | lbu TMP0, TAB:TMP2->nomm
+ | andi TMP0, TMP0, 1<<MM_index
+ | bnez TMP0, <1 // 'no __index' flag set: done.
+ |. nop
+ | b ->vmeta_tgetv
+ |. nop
+ |
+ |5:
+ | li AT, LJ_TSTR
+ | bne TMP3, AT, ->vmeta_tgetv
+ |. cleartp RC, TMP2
+ | b ->BC_TGETS_Z // String key?
+ |. nop
+ break;
+ case BC_TGETS:
+ | // RA = dst*8, RB = table*8, RC = str_const*8 (~)
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RC8a RC, INS
+ | daddu CARG2, BASE, RB
+ | decode_RC8b RC
+ | ld TAB:RB, 0(CARG2)
+ | dsubu CARG3, KBASE, RC
+ | daddu RA, BASE, RA
+ | ld STR:RC, -8(CARG3) // KBASE-8-str_const*8
+ | checktab TAB:RB, ->vmeta_tgets1
+ |->BC_TGETS_Z:
+ | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = dst*8
+ | lw TMP0, TAB:RB->hmask
+ | lw TMP1, STR:RC->sid
+ | ld NODE:TMP2, TAB:RB->node
+ | and TMP1, TMP1, TMP0 // idx = str->sid & tab->hmask
+ | sll TMP0, TMP1, 5
+ | sll TMP1, TMP1, 3
+ | subu TMP1, TMP0, TMP1
+ | li TMP3, LJ_TSTR
+ | daddu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ | settp STR:RC, TMP3 // Tagged key to look for.
+ |1:
+ | ld CARG1, NODE:TMP2->key
+ | ld CRET1, NODE:TMP2->val
+ | ld NODE:TMP1, NODE:TMP2->next
+ | bne CARG1, RC, >4
+ |. ld TAB:TMP3, TAB:RB->metatable
+ | beq CRET1, TISNIL, >5 // Key found, but nil value?
+ |. nop
+ |3:
+ | ins_next1
+ | sd CRET1, 0(RA)
+ | ins_next2
+ |
+ |4: // Follow hash chain.
+ | bnez NODE:TMP1, <1
+ |. move NODE:TMP2, NODE:TMP1
+ | // End of hash chain: key not found, nil result.
+ |
+ |5: // Check for __index if table value is nil.
+ | beqz TAB:TMP3, <3 // No metatable: done.
+ |. move CRET1, TISNIL
+ | lbu TMP0, TAB:TMP3->nomm
+ | andi TMP0, TMP0, 1<<MM_index
+ | bnez TMP0, <3 // 'no __index' flag set: done.
+ |. nop
+ | b ->vmeta_tgets
+ |. nop
+ break;
+ case BC_TGETB:
+ | // RA = dst*8, RB = table*8, RC = index*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | daddu CARG2, BASE, RB
+ | decode_RDtoRC8 RC, RD
+ | ld TAB:RB, 0(CARG2)
+ | daddu RA, BASE, RA
+ | srl TMP0, RC, 3
+ | checktab TAB:RB, ->vmeta_tgetb
+ | lw TMP1, TAB:RB->asize
+ | ld TMP2, TAB:RB->array
+ | sltu AT, TMP0, TMP1
+ | beqz AT, ->vmeta_tgetb
+ |. daddu RC, TMP2, RC
+ | ld AT, 0(RC)
+ | beq AT, TISNIL, >5
+ |. ld CRET1, 0(RC)
+ |1:
+ | ins_next1
+ | sd CRET1, 0(RA)
+ | ins_next2
+ |
+ |5: // Check for __index if table value is nil.
+ | ld TAB:TMP2, TAB:RB->metatable
+ | beqz TAB:TMP2, <1 // No metatable: done.
+ |. nop
+ | lbu TMP1, TAB:TMP2->nomm
+ | andi TMP1, TMP1, 1<<MM_index
+ | bnez TMP1, <1 // 'no __index' flag set: done.
+ |. nop
+ | b ->vmeta_tgetb // Caveat: preserve TMP0 and CARG2!
+ |. nop
+ break;
+ case BC_TGETR:
+ | // RA = dst*8, RB = table*8, RC = key*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | daddu RB, BASE, RB
+ | daddu RC, BASE, RC
+ | ld TAB:CARG1, 0(RB)
+ | lw CARG2, LO(RC)
+ | daddu RA, BASE, RA
+ | cleartp TAB:CARG1
+ | lw TMP0, TAB:CARG1->asize
+ | ld TMP1, TAB:CARG1->array
+ | sltu AT, CARG2, TMP0
+ | sll TMP2, CARG2, 3
+ | beqz AT, ->vmeta_tgetr // In array part?
+ |. daddu CRET1, TMP1, TMP2
+ | ld CARG2, 0(CRET1)
+ |->BC_TGETR_Z:
+ | ins_next1
+ | sd CARG2, 0(RA)
+ | ins_next2
+ break;
+
+ case BC_TSETV:
+ | // RA = src*8, RB = table*8, RC = key*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | daddu CARG2, BASE, RB
+ | daddu CARG3, BASE, RC
+ | ld RB, 0(CARG2)
+ | ld TMP2, 0(CARG3)
+ | daddu RA, BASE, RA
+ | checktab RB, ->vmeta_tsetv
+ | checkint TMP2, >5
+ |. sextw RC, TMP2
+ | lw TMP0, TAB:RB->asize
+ | ld TMP1, TAB:RB->array
+ | sltu AT, RC, TMP0
+ | sll TMP2, RC, 3
+ | beqz AT, ->vmeta_tsetv // Integer key and in array part?
+ |. daddu TMP1, TMP1, TMP2
+ | ld TMP0, 0(TMP1)
+ | lbu TMP3, TAB:RB->marked
+ | beq TMP0, TISNIL, >3
+ |. ld CRET1, 0(RA)
+ |1:
+ | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
+ | bnez AT, >7
+ |. sd CRET1, 0(TMP1)
+ |2:
+ | ins_next
+ |
+ |3: // Check for __newindex if previous value is nil.
+ | ld TAB:TMP2, TAB:RB->metatable
+ | beqz TAB:TMP2, <1 // No metatable: done.
+ |. nop
+ | lbu TMP2, TAB:TMP2->nomm
+ | andi TMP2, TMP2, 1<<MM_newindex
+ | bnez TMP2, <1 // 'no __newindex' flag set: done.
+ |. nop
+ | b ->vmeta_tsetv
+ |. nop
+ |
+ |5:
+ | gettp AT, TMP2
+ | daddiu AT, AT, -LJ_TSTR
+ | bnez AT, ->vmeta_tsetv
+ |. nop
+ | b ->BC_TSETS_Z // String key?
+ |. cleartp STR:RC, TMP2
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0, <2
+ break;
+ case BC_TSETS:
+ | // RA = src*8, RB = table*8, RC = str_const*8 (~)
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | daddu CARG2, BASE, RB
+ | decode_RC8a RC, INS
+ | ld TAB:RB, 0(CARG2)
+ | decode_RC8b RC
+ | dsubu CARG3, KBASE, RC
+ | ld RC, -8(CARG3) // KBASE-8-str_const*8
+ | daddu RA, BASE, RA
+ | cleartp STR:RC
+ | checktab TAB:RB, ->vmeta_tsets1
+ |->BC_TSETS_Z:
+ | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = BASE+src*8
+ | lw TMP0, TAB:RB->hmask
+ | lw TMP1, STR:RC->sid
+ | ld NODE:TMP2, TAB:RB->node
+ | sb r0, TAB:RB->nomm // Clear metamethod cache.
+ | and TMP1, TMP1, TMP0 // idx = str->sid & tab->hmask
+ | sll TMP0, TMP1, 5
+ | sll TMP1, TMP1, 3
+ | subu TMP1, TMP0, TMP1
+ | li TMP3, LJ_TSTR
+ | daddu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ | settp STR:RC, TMP3 // Tagged key to look for.
+ |.if FPU
+ | ldc1 FTMP0, 0(RA)
+ |.else
+ | ld CRET1, 0(RA)
+ |.endif
+ |1:
+ | ld TMP0, NODE:TMP2->key
+ | ld CARG2, NODE:TMP2->val
+ | ld NODE:TMP1, NODE:TMP2->next
+ | bne TMP0, RC, >5
+ |. lbu TMP3, TAB:RB->marked
+ | beq CARG2, TISNIL, >4 // Key found, but nil value?
+ |. ld TAB:TMP0, TAB:RB->metatable
+ |2:
+ | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
+ | bnez AT, >7
+ |.if FPU
+ |. sdc1 FTMP0, NODE:TMP2->val
+ |.else
+ |. sd CRET1, NODE:TMP2->val
+ |.endif
+ |3:
+ | ins_next
+ |
+ |4: // Check for __newindex if previous value is nil.
+ | beqz TAB:TMP0, <2 // No metatable: done.
+ |. nop
+ | lbu TMP0, TAB:TMP0->nomm
+ | andi TMP0, TMP0, 1<<MM_newindex
+ | bnez TMP0, <2 // 'no __newindex' flag set: done.
+ |. nop
+ | b ->vmeta_tsets
+ |. nop
+ |
+ |5: // Follow hash chain.
+ | bnez NODE:TMP1, <1
+ |. move NODE:TMP2, NODE:TMP1
+ | // End of hash chain: key not found, add a new one
+ |
+ | // But check for __newindex first.
+ | ld TAB:TMP2, TAB:RB->metatable
+ | beqz TAB:TMP2, >6 // No metatable: continue.
+ |. daddiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | lbu TMP0, TAB:TMP2->nomm
+ | andi TMP0, TMP0, 1<<MM_newindex
+ | beqz TMP0, ->vmeta_tsets // 'no __newindex' flag NOT set: check.
+ |6:
+ | load_got lj_tab_newkey
+ | sd RC, 0(CARG3)
+ | sd BASE, L->base
+ | move CARG2, TAB:RB
+ | sd PC, SAVE_PC
+ | call_intern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k
+ |. move CARG1, L
+ | // Returns TValue *.
+ | ld BASE, L->base
+ |.if FPU
+ | b <3 // No 2nd write barrier needed.
+ |. sdc1 FTMP0, 0(CRET1)
+ |.else
+ | ld CARG1, 0(RA)
+ | b <3 // No 2nd write barrier needed.
+ |. sd CARG1, 0(CRET1)
+ |.endif
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0, <3
+ break;
+ case BC_TSETB:
+ | // RA = src*8, RB = table*8, RC = index*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | daddu CARG2, BASE, RB
+ | decode_RDtoRC8 RC, RD
+ | ld TAB:RB, 0(CARG2)
+ | daddu RA, BASE, RA
+ | srl TMP0, RC, 3
+ | checktab RB, ->vmeta_tsetb
+ | lw TMP1, TAB:RB->asize
+ | ld TMP2, TAB:RB->array
+ | sltu AT, TMP0, TMP1
+ | beqz AT, ->vmeta_tsetb
+ |. daddu RC, TMP2, RC
+ | ld TMP1, 0(RC)
+ | lbu TMP3, TAB:RB->marked
+ | beq TMP1, TISNIL, >5
+ |1:
+ |. ld CRET1, 0(RA)
+ | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
+ | bnez AT, >7
+ |. sd CRET1, 0(RC)
+ |2:
+ | ins_next
+ |
+ |5: // Check for __newindex if previous value is nil.
+ | ld TAB:TMP2, TAB:RB->metatable
+ | beqz TAB:TMP2, <1 // No metatable: done.
+ |. nop
+ | lbu TMP1, TAB:TMP2->nomm
+ | andi TMP1, TMP1, 1<<MM_newindex
+ | bnez TMP1, <1 // 'no __newindex' flag set: done.
+ |. nop
+ | b ->vmeta_tsetb // Caveat: preserve TMP0 and CARG2!
+ |. nop
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0, <2
+ break;
+ case BC_TSETR:
+ | // RA = dst*8, RB = table*8, RC = key*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | daddu CARG1, BASE, RB
+ | daddu CARG3, BASE, RC
+ | ld TAB:CARG2, 0(CARG1)
+ | lw CARG3, LO(CARG3)
+ | cleartp TAB:CARG2
+ | lbu TMP3, TAB:CARG2->marked
+ | lw TMP0, TAB:CARG2->asize
+ | ld TMP1, TAB:CARG2->array
+ | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
+ | bnez AT, >7
+ |. daddu RA, BASE, RA
+ |2:
+ | sltu AT, CARG3, TMP0
+ | sll TMP2, CARG3, 3
+ | beqz AT, ->vmeta_tsetr // In array part?
+ |. daddu CRET1, TMP1, TMP2
+ |->BC_TSETR_Z:
+ | ld CARG1, 0(RA)
+ | ins_next1
+ | sd CARG1, 0(CRET1)
+ | ins_next2
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:CARG2, TMP3, CRET1, <2
+ break;
+
+ case BC_TSETM:
+ | // RA = base*8 (table at base-1), RD = num_const*8 (start index)
+ | daddu RA, BASE, RA
+ |1:
+ | daddu TMP3, KBASE, RD
+ | ld TAB:CARG2, -8(RA) // Guaranteed to be a table.
+ | addiu TMP0, MULTRES, -8
+ | lw TMP3, LO(TMP3) // Integer constant is in lo-word.
+ | beqz TMP0, >4 // Nothing to copy?
+ |. srl CARG3, TMP0, 3
+ | cleartp CARG2
+ | addu CARG3, CARG3, TMP3
+ | lw TMP2, TAB:CARG2->asize
+ | sll TMP1, TMP3, 3
+ | lbu TMP3, TAB:CARG2->marked
+ | ld CARG1, TAB:CARG2->array
+ | sltu AT, TMP2, CARG3
+ | bnez AT, >5
+ |. daddu TMP2, RA, TMP0
+ | daddu TMP1, TMP1, CARG1
+ | andi TMP0, TMP3, LJ_GC_BLACK // isblack(table)
+ |3: // Copy result slots to table.
+ | ld CRET1, 0(RA)
+ | daddiu RA, RA, 8
+ | sltu AT, RA, TMP2
+ | sd CRET1, 0(TMP1)
+ | bnez AT, <3
+ |. daddiu TMP1, TMP1, 8
+ | bnez TMP0, >7
+ |. nop
+ |4:
+ | ins_next
+ |
+ |5: // Need to resize array part.
+ | load_got lj_tab_reasize
+ | sd BASE, L->base
+ | sd PC, SAVE_PC
+ | move BASE, RD
+ | call_intern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize)
+ |. move CARG1, L
+ | // Must not reallocate the stack.
+ | move RD, BASE
+ | b <1
+ |. ld BASE, L->base // Reload BASE for lack of a saved register.
+ |
+ |7: // Possible table write barrier for any value. Skip valiswhite check.
+ | barrierback TAB:CARG2, TMP3, TMP0, <4
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_CALLM:
+ | // RA = base*8, (RB = (nresults+1)*8,) RC = extra_nargs*8
+ | decode_RDtoRC8 NARGS8:RC, RD
+ | b ->BC_CALL_Z
+ |. addu NARGS8:RC, NARGS8:RC, MULTRES
+ break;
+ case BC_CALL:
+ | // RA = base*8, (RB = (nresults+1)*8,) RC = (nargs+1)*8
+ | decode_RDtoRC8 NARGS8:RC, RD
+ |->BC_CALL_Z:
+ | move TMP2, BASE
+ | daddu BASE, BASE, RA
+ | ld LFUNC:RB, 0(BASE)
+ | daddiu BASE, BASE, 16
+ | addiu NARGS8:RC, NARGS8:RC, -8
+ | checkfunc RB, ->vmeta_call
+ | ins_call
+ break;
+
+ case BC_CALLMT:
+ | // RA = base*8, (RB = 0,) RC = extra_nargs*8
+ | addu NARGS8:RD, NARGS8:RD, MULTRES // BC_CALLT gets RC from RD.
+ | // Fall through. Assumes BC_CALLT follows.
+ break;
+ case BC_CALLT:
+ | // RA = base*8, (RB = 0,) RC = (nargs+1)*8
+ | daddu RA, BASE, RA
+ | ld RB, 0(RA)
+ | move NARGS8:RC, RD
+ | ld TMP1, FRAME_PC(BASE)
+ | daddiu RA, RA, 16
+ | addiu NARGS8:RC, NARGS8:RC, -8
+ | checktp CARG3, RB, -LJ_TFUNC, ->vmeta_callt
+ |->BC_CALLT_Z:
+ | andi TMP0, TMP1, FRAME_TYPE // Caveat: preserve TMP0 until the 'or'.
+ | lbu TMP3, LFUNC:CARG3->ffid
+ | bnez TMP0, >7
+ |. xori TMP2, TMP1, FRAME_VARG
+ |1:
+ | sd RB, FRAME_FUNC(BASE) // Copy function down, but keep PC.
+ | sltiu AT, TMP3, 2 // (> FF_C) Calling a fast function?
+ | move TMP2, BASE
+ | move RB, CARG3
+ | beqz NARGS8:RC, >3
+ |. move TMP3, NARGS8:RC
+ |2:
+ | ld CRET1, 0(RA)
+ | daddiu RA, RA, 8
+ | addiu TMP3, TMP3, -8
+ | sd CRET1, 0(TMP2)
+ | bnez TMP3, <2
+ |. daddiu TMP2, TMP2, 8
+ |3:
+ | or TMP0, TMP0, AT
+ | beqz TMP0, >5
+ |. nop
+ |4:
+ | ins_callt
+ |
+ |5: // Tailcall to a fast function with a Lua frame below.
+ | lw INS, -4(TMP1)
+ | decode_RA8a RA, INS
+ | decode_RA8b RA
+ | dsubu TMP1, BASE, RA
+ | ld TMP1, -32(TMP1)
+ | cleartp LFUNC:TMP1
+ | ld TMP1, LFUNC:TMP1->pc
+ | b <4
+ |. ld KBASE, PC2PROTO(k)(TMP1) // Need to prepare KBASE.
+ |
+ |7: // Tailcall from a vararg function.
+ | andi AT, TMP2, FRAME_TYPEP
+ | bnez AT, <1 // Vararg frame below?
+ |. dsubu TMP2, BASE, TMP2 // Relocate BASE down.
+ | move BASE, TMP2
+ | ld TMP1, FRAME_PC(TMP2)
+ | b <1
+ |. andi TMP0, TMP1, FRAME_TYPE
+ break;
+
+ case BC_ITERC:
+ | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 ((2+1)*8))
+ | move TMP2, BASE // Save old BASE fir vmeta_call.
+ | daddu BASE, BASE, RA
+ | ld RB, -24(BASE)
+ | ld CARG1, -16(BASE)
+ | ld CARG2, -8(BASE)
+ | li NARGS8:RC, 16 // Iterators get 2 arguments.
+ | sd RB, 0(BASE) // Copy callable.
+ | sd CARG1, 16(BASE) // Copy state.
+ | sd CARG2, 24(BASE) // Copy control var.
+ | daddiu BASE, BASE, 16
+ | checkfunc RB, ->vmeta_call
+ | ins_call
+ break;
+
+ case BC_ITERN:
+ |.if JIT and ENDIAN_LE
+ | hotloop
+ |.endif
+ |->vm_IITERN:
+ | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 (2+1)*8)
+ | daddu RA, BASE, RA
+ | ld TAB:RB, -16(RA)
+ | lw RC, -8+LO(RA) // Get index from control var.
+ | cleartp TAB:RB
+ | daddiu PC, PC, 4
+ | lw TMP0, TAB:RB->asize
+ | ld TMP1, TAB:RB->array
+ | dsll CARG3, TISNUM, 47
+ |1: // Traverse array part.
+ | sltu AT, RC, TMP0
+ | beqz AT, >5 // Index points after array part?
+ |. sll TMP3, RC, 3
+ | daddu TMP3, TMP1, TMP3
+ | ld CARG1, 0(TMP3)
+ | lhu RD, -4+OFS_RD(PC)
+ | or TMP2, RC, CARG3
+ | beq CARG1, TISNIL, <1 // Skip holes in array part.
+ |. addiu RC, RC, 1
+ | sd TMP2, 0(RA)
+ | sd CARG1, 8(RA)
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | decode_RD4b RD
+ | daddu RD, RD, TMP3
+ | sw RC, -8+LO(RA) // Update control var.
+ | daddu PC, PC, RD
+ |3:
+ | ins_next
+ |
+ |5: // Traverse hash part.
+ | lw TMP1, TAB:RB->hmask
+ | subu RC, RC, TMP0
+ | ld TMP2, TAB:RB->node
+ |6:
+ | sltu AT, TMP1, RC // End of iteration? Branch to ITERL+1.
+ | bnez AT, <3
+ |. sll TMP3, RC, 5
+ | sll RB, RC, 3
+ | subu TMP3, TMP3, RB
+ | daddu NODE:TMP3, TMP3, TMP2
+ | ld CARG1, 0(NODE:TMP3)
+ | lhu RD, -4+OFS_RD(PC)
+ | beq CARG1, TISNIL, <6 // Skip holes in hash part.
+ |. addiu RC, RC, 1
+ | ld CARG2, NODE:TMP3->key
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | sd CARG1, 8(RA)
+ | addu RC, RC, TMP0
+ | decode_RD4b RD
+ | addu RD, RD, TMP3
+ | sd CARG2, 0(RA)
+ | daddu PC, PC, RD
+ | b <3
+ |. sw RC, -8+LO(RA) // Update control var.
+ break;
+
+ case BC_ISNEXT:
+ | // RA = base*8, RD = target (points to ITERN)
+ | daddu RA, BASE, RA
+ | srl TMP0, RD, 1
+ | ld CFUNC:CARG1, -24(RA)
+ | daddu TMP0, PC, TMP0
+ | ld CARG2, -16(RA)
+ | ld CARG3, -8(RA)
+ | lui TMP2, (-(BCBIAS_J*4 >> 16) & 65535)
+ | checkfunc CFUNC:CARG1, >5
+ | gettp CARG2, CARG2
+ | daddiu CARG2, CARG2, -LJ_TTAB
+ | lbu TMP1, CFUNC:CARG1->ffid
+ | daddiu CARG3, CARG3, -LJ_TNIL
+ | or AT, CARG2, CARG3
+ | daddiu TMP1, TMP1, -FF_next_N
+ | or AT, AT, TMP1
+ | bnez AT, >5
+ |. lui TMP1, (LJ_KEYINDEX >> 16)
+ | daddu PC, TMP0, TMP2
+ | ori TMP1, TMP1, (LJ_KEYINDEX & 0xffff)
+ | dsll TMP1, TMP1, 32
+ | sd TMP1, -8(RA)
+ |1:
+ | ins_next
+ |5: // Despecialize bytecode if any of the checks fail.
+ | li TMP3, BC_JMP
+ | li TMP1, BC_ITERC
+ | sb TMP3, -4+OFS_OP(PC)
+ | daddu PC, TMP0, TMP2
+ |.if JIT
+ | lb TMP0, OFS_OP(PC)
+ | li AT, BC_ITERN
+ | bne TMP0, AT, >6
+ |. lhu TMP2, OFS_RD(PC)
+ |.endif
+ | b <1
+ |. sb TMP1, OFS_OP(PC)
+ |.if JIT
+ |6: // Unpatch JLOOP.
+ | ld TMP0, DISPATCH_J(trace)(DISPATCH)
+ | sll TMP2, TMP2, 3
+ | daddu TMP0, TMP0, TMP2
+ | ld TRACE:TMP2, 0(TMP0)
+ | lw TMP0, TRACE:TMP2->startins
+ | li AT, -256
+ | and TMP0, TMP0, AT
+ | or TMP0, TMP0, TMP1
+ | b <1
+ |. sw TMP0, 0(PC)
+ |.endif
+ break;
+
+ case BC_VARG:
+ | // RA = base*8, RB = (nresults+1)*8, RC = numparams*8
+ | ld TMP0, FRAME_PC(BASE)
+ | decode_RDtoRC8 RC, RD
+ | decode_RB8a RB, INS
+ | daddu RC, BASE, RC
+ | decode_RB8b RB
+ | daddu RA, BASE, RA
+ | daddiu RC, RC, FRAME_VARG
+ | daddu TMP2, RA, RB
+ | daddiu TMP3, BASE, -16 // TMP3 = vtop
+ | dsubu RC, RC, TMP0 // RC = vbase
+ | // Note: RC may now be even _above_ BASE if nargs was < numparams.
+ | beqz RB, >5 // Copy all varargs?
+ |. dsubu TMP1, TMP3, RC
+ | daddiu TMP2, TMP2, -16
+ |1: // Copy vararg slots to destination slots.
+ | ld CARG1, 0(RC)
+ | sltu AT, RC, TMP3
+ | daddiu RC, RC, 8
+ |.if MIPSR6
+ | selnez CARG1, CARG1, AT
+ | seleqz AT, TISNIL, AT
+ | or CARG1, CARG1, AT
+ |.else
+ | movz CARG1, TISNIL, AT
+ |.endif
+ | sd CARG1, 0(RA)
+ | sltu AT, RA, TMP2
+ | bnez AT, <1
+ |. daddiu RA, RA, 8
+ |3:
+ | ins_next
+ |
+ |5: // Copy all varargs.
+ | ld TMP0, L->maxstack
+ | blez TMP1, <3 // No vararg slots?
+ |. li MULTRES, 8 // MULTRES = (0+1)*8
+ | daddu TMP2, RA, TMP1
+ | sltu AT, TMP0, TMP2
+ | bnez AT, >7
+ |. daddiu MULTRES, TMP1, 8
+ |6:
+ | ld CRET1, 0(RC)
+ | daddiu RC, RC, 8
+ | sd CRET1, 0(RA)
+ | sltu AT, RC, TMP3
+ | bnez AT, <6 // More vararg slots?
+ |. daddiu RA, RA, 8
+ | b <3
+ |. nop
+ |
+ |7: // Grow stack for varargs.
+ | load_got lj_state_growstack
+ | sd RA, L->top
+ | dsubu RA, RA, BASE
+ | sd BASE, L->base
+ | dsubu BASE, RC, BASE // Need delta, because BASE may change.
+ | sd PC, SAVE_PC
+ | srl CARG2, TMP1, 3
+ | call_intern lj_state_growstack // (lua_State *L, int n)
+ |. move CARG1, L
+ | move RC, BASE
+ | ld BASE, L->base
+ | daddu RA, BASE, RA
+ | daddu RC, BASE, RC
+ | b <6
+ |. daddiu TMP3, BASE, -16
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ | // RA = results*8, RD = extra_nresults*8
+ | addu RD, RD, MULTRES // MULTRES >= 8, so RD >= 8.
+ | // Fall through. Assumes BC_RET follows.
+ break;
+
+ case BC_RET:
+ | // RA = results*8, RD = (nresults+1)*8
+ | ld PC, FRAME_PC(BASE)
+ | daddu RA, BASE, RA
+ | move MULTRES, RD
+ |1:
+ | andi TMP0, PC, FRAME_TYPE
+ | bnez TMP0, ->BC_RETV_Z
+ |. xori TMP1, PC, FRAME_VARG
+ |
+ |->BC_RET_Z:
+ | // BASE = base, RA = resultptr, RD = (nresults+1)*8, PC = return
+ | lw INS, -4(PC)
+ | daddiu TMP2, BASE, -16
+ | daddiu RC, RD, -8
+ | decode_RA8a TMP0, INS
+ | decode_RB8a RB, INS
+ | decode_RA8b TMP0
+ | decode_RB8b RB
+ | daddu TMP3, TMP2, RB
+ | beqz RC, >3
+ |. dsubu BASE, TMP2, TMP0
+ |2:
+ | ld CRET1, 0(RA)
+ | daddiu RA, RA, 8
+ | daddiu RC, RC, -8
+ | sd CRET1, 0(TMP2)
+ | bnez RC, <2
+ |. daddiu TMP2, TMP2, 8
+ |3:
+ | daddiu TMP3, TMP3, -8
+ |5:
+ | sltu AT, TMP2, TMP3
+ | bnez AT, >6
+ |. ld LFUNC:TMP1, FRAME_FUNC(BASE)
+ | ins_next1
+ | cleartp LFUNC:TMP1
+ | ld TMP1, LFUNC:TMP1->pc
+ | ld KBASE, PC2PROTO(k)(TMP1)
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | sd TISNIL, 0(TMP2)
+ | b <5
+ |. daddiu TMP2, TMP2, 8
+ |
+ |->BC_RETV_Z: // Non-standard return case.
+ | andi TMP2, TMP1, FRAME_TYPEP
+ | bnez TMP2, ->vm_return
+ |. nop
+ | // Return from vararg function: relocate BASE down.
+ | dsubu BASE, BASE, TMP1
+ | b <1
+ |. ld PC, FRAME_PC(BASE)
+ break;
+
+ case BC_RET0: case BC_RET1:
+ | // RA = results*8, RD = (nresults+1)*8
+ | ld PC, FRAME_PC(BASE)
+ | daddu RA, BASE, RA
+ | move MULTRES, RD
+ | andi TMP0, PC, FRAME_TYPE
+ | bnez TMP0, ->BC_RETV_Z
+ |. xori TMP1, PC, FRAME_VARG
+ | lw INS, -4(PC)
+ | daddiu TMP2, BASE, -16
+ if (op == BC_RET1) {
+ | ld CRET1, 0(RA)
+ }
+ | decode_RB8a RB, INS
+ | decode_RA8a RA, INS
+ | decode_RB8b RB
+ | decode_RA8b RA
+ | dsubu BASE, TMP2, RA
+ if (op == BC_RET1) {
+ | sd CRET1, 0(TMP2)
+ }
+ |5:
+ | sltu AT, RD, RB
+ | bnez AT, >6
+ |. ld TMP1, FRAME_FUNC(BASE)
+ | ins_next1
+ | cleartp LFUNC:TMP1
+ | ld TMP1, LFUNC:TMP1->pc
+ | ld KBASE, PC2PROTO(k)(TMP1)
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | daddiu TMP2, TMP2, 8
+ | daddiu RD, RD, 8
+ | b <5
+ if (op == BC_RET1) {
+ |. sd TISNIL, 0(TMP2)
+ } else {
+ |. sd TISNIL, -8(TMP2)
+ }
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+ case BC_FORL:
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_IFORL follows.
+ break;
+
+ case BC_JFORI:
+ case BC_JFORL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_FORI:
+ case BC_IFORL:
+ | // RA = base*8, RD = target (after end of loop or start of loop)
+ vk = (op == BC_IFORL || op == BC_JFORL);
+ | daddu RA, BASE, RA
+ | ld CARG1, FORL_IDX*8(RA) // IDX CARG1 - CARG3 type
+ | gettp CARG3, CARG1
+ if (op != BC_JFORL) {
+ | srl RD, RD, 1
+ | lui TMP2, (-(BCBIAS_J*4 >> 16) & 65535)
+ | daddu TMP2, RD, TMP2
+ }
+ if (!vk) {
+ | ld CARG2, FORL_STOP*8(RA) // STOP CARG2 - CARG4 type
+ | ld CRET1, FORL_STEP*8(RA) // STEP CRET1 - CRET2 type
+ | gettp CARG4, CARG2
+ | bne CARG3, TISNUM, >5
+ |. gettp CRET2, CRET1
+ | bne CARG4, TISNUM, ->vmeta_for
+ |. sextw CARG3, CARG1
+ | bne CRET2, TISNUM, ->vmeta_for
+ |. sextw CARG2, CARG2
+ | dext AT, CRET1, 31, 0
+ | slt CRET1, CARG2, CARG3
+ | slt TMP1, CARG3, CARG2
+ |.if MIPSR6
+ | selnez TMP1, TMP1, AT
+ | seleqz CRET1, CRET1, AT
+ | or CRET1, CRET1, TMP1
+ |.else
+ | movn CRET1, TMP1, AT
+ |.endif
+ } else {
+ | bne CARG3, TISNUM, >5
+ |. ld CARG2, FORL_STEP*8(RA) // STEP CARG2 - CARG4 type
+ | ld CRET1, FORL_STOP*8(RA) // STOP CRET1 - CRET2 type
+ | sextw TMP3, CARG1
+ | sextw CARG2, CARG2
+ | sextw CRET1, CRET1
+ | addu CARG1, TMP3, CARG2
+ | xor TMP0, CARG1, TMP3
+ | xor TMP1, CARG1, CARG2
+ | and TMP0, TMP0, TMP1
+ | slt TMP1, CARG1, CRET1
+ | slt CRET1, CRET1, CARG1
+ | slt AT, CARG2, r0
+ | slt TMP0, TMP0, r0 // ((y^a) & (y^b)) < 0: overflow.
+ |.if MIPSR6
+ | selnez TMP1, TMP1, AT
+ | seleqz CRET1, CRET1, AT
+ | or CRET1, CRET1, TMP1
+ |.else
+ | movn CRET1, TMP1, AT
+ |.endif
+ | or CRET1, CRET1, TMP0
+ | zextw CARG1, CARG1
+ | settp CARG1, TISNUM
+ }
+ |1:
+ if (op == BC_FORI) {
+ |.if MIPSR6
+ | selnez TMP2, TMP2, CRET1
+ |.else
+ | movz TMP2, r0, CRET1
+ |.endif
+ | daddu PC, PC, TMP2
+ } else if (op == BC_JFORI) {
+ | daddu PC, PC, TMP2
+ | lhu RD, -4+OFS_RD(PC)
+ } else if (op == BC_IFORL) {
+ |.if MIPSR6
+ | seleqz TMP2, TMP2, CRET1
+ |.else
+ | movn TMP2, r0, CRET1
+ |.endif
+ | daddu PC, PC, TMP2
+ }
+ if (vk) {
+ | sd CARG1, FORL_IDX*8(RA)
+ }
+ | ins_next1
+ | sd CARG1, FORL_EXT*8(RA)
+ |2:
+ if (op == BC_JFORI) {
+ | beqz CRET1, =>BC_JLOOP
+ |. decode_RD8b RD
+ } else if (op == BC_JFORL) {
+ | beqz CRET1, =>BC_JLOOP
+ }
+ | ins_next2
+ |
+ |5: // FP loop.
+ |.if FPU
+ if (!vk) {
+ | ldc1 f0, FORL_IDX*8(RA)
+ | ldc1 f2, FORL_STOP*8(RA)
+ | sltiu TMP0, CARG3, LJ_TISNUM
+ | sltiu TMP1, CARG4, LJ_TISNUM
+ | sltiu AT, CRET2, LJ_TISNUM
+ | ld TMP3, FORL_STEP*8(RA)
+ | and TMP0, TMP0, TMP1
+ | and AT, AT, TMP0
+ | beqz AT, ->vmeta_for
+ |. slt TMP3, TMP3, r0
+ |.if MIPSR6
+ | dmtc1 TMP3, FTMP2
+ | cmp.lt.d FTMP0, f0, f2
+ | cmp.lt.d FTMP1, f2, f0
+ | sel.d FTMP2, FTMP1, FTMP0
+ | b <1
+ |. dmfc1 CRET1, FTMP2
+ |.else
+ | c.ole.d 0, f0, f2
+ | c.ole.d 1, f2, f0
+ | li CRET1, 1
+ | movt CRET1, r0, 0
+ | movt AT, r0, 1
+ | b <1
+ |. movn CRET1, AT, TMP3
+ |.endif
+ } else {
+ | ldc1 f0, FORL_IDX*8(RA)
+ | ldc1 f4, FORL_STEP*8(RA)
+ | ldc1 f2, FORL_STOP*8(RA)
+ | ld TMP3, FORL_STEP*8(RA)
+ | add.d f0, f0, f4
+ |.if MIPSR6
+ | slt TMP3, TMP3, r0
+ | dmtc1 TMP3, FTMP2
+ | cmp.lt.d FTMP0, f0, f2
+ | cmp.lt.d FTMP1, f2, f0
+ | sel.d FTMP2, FTMP1, FTMP0
+ | dmfc1 CRET1, FTMP2
+ if (op == BC_IFORL) {
+ | seleqz TMP2, TMP2, CRET1
+ | daddu PC, PC, TMP2
+ }
+ |.else
+ | c.ole.d 0, f0, f2
+ | c.ole.d 1, f2, f0
+ | slt TMP3, TMP3, r0
+ | li CRET1, 1
+ | li AT, 1
+ | movt CRET1, r0, 0
+ | movt AT, r0, 1
+ | movn CRET1, AT, TMP3
+ if (op == BC_IFORL) {
+ | movn TMP2, r0, CRET1
+ | daddu PC, PC, TMP2
+ }
+ |.endif
+ | sdc1 f0, FORL_IDX*8(RA)
+ | ins_next1
+ | b <2
+ |. sdc1 f0, FORL_EXT*8(RA)
+ }
+ |.else
+ if (!vk) {
+ | sltiu TMP0, CARG3, LJ_TISNUM
+ | sltiu TMP1, CARG4, LJ_TISNUM
+ | sltiu AT, CRET2, LJ_TISNUM
+ | and TMP0, TMP0, TMP1
+ | and AT, AT, TMP0
+ | beqz AT, ->vmeta_for
+ |. nop
+ | bal ->vm_sfcmpolex
+ |. lw TMP3, FORL_STEP*8+HI(RA)
+ | b <1
+ |. nop
+ } else {
+ | load_got __adddf3
+ | call_extern
+ |. sw TMP2, TMPD
+ | ld CARG2, FORL_STOP*8(RA)
+ | move CARG1, CRET1
+ if ( op == BC_JFORL ) {
+ | lhu RD, -4+OFS_RD(PC)
+ | decode_RD8b RD
+ }
+ | bal ->vm_sfcmpolex
+ |. lw TMP3, FORL_STEP*8+HI(RA)
+ | b <1
+ |. lw TMP2, TMPD
+ }
+ |.endif
+ break;
+
+ case BC_ITERL:
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_IITERL follows.
+ break;
+
+ case BC_JITERL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IITERL:
+ | // RA = base*8, RD = target
+ | daddu RA, BASE, RA
+ | ld TMP1, 0(RA)
+ | beq TMP1, TISNIL, >1 // Stop if iterator returned nil.
+ |. nop
+ if (op == BC_JITERL) {
+ | b =>BC_JLOOP
+ |. sd TMP1, -8(RA)
+ } else {
+ | branch_RD // Otherwise save control var + branch.
+ | sd TMP1, -8(RA)
+ }
+ |1:
+ | ins_next
+ break;
+
+ case BC_LOOP:
+ | // RA = base*8, RD = target (loop extent)
+ | // Note: RA/RD is only used by trace recorder to determine scope/extent
+ | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_ILOOP follows.
+ break;
+
+ case BC_ILOOP:
+ | // RA = base*8, RD = target (loop extent)
+ | ins_next
+ break;
+
+ case BC_JLOOP:
+ |.if JIT
+ | // RA = base*8 (ignored), RD = traceno*8
+ | ld TMP1, DISPATCH_J(trace)(DISPATCH)
+ | li AT, 0
+ | daddu TMP1, TMP1, RD
+ | // Traces on MIPS don't store the trace number, so use 0.
+ | sd AT, DISPATCH_GL(vmstate)(DISPATCH)
+ | ld TRACE:TMP2, 0(TMP1)
+ | sd BASE, DISPATCH_GL(jit_base)(DISPATCH)
+ | ld TMP2, TRACE:TMP2->mcode
+ | sd L, DISPATCH_GL(tmpbuf.L)(DISPATCH)
+ | jr TMP2
+ |. daddiu JGL, DISPATCH, GG_DISP2G+32768
+ |.endif
+ break;
+
+ case BC_JMP:
+ | // RA = base*8 (only used by trace recorder), RD = target
+ | branch_RD
+ | ins_next
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ case BC_FUNCF:
+ |.if JIT
+ | hotcall
+ |.endif
+ case BC_FUNCV: /* NYI: compiled vararg functions. */
+ | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow.
+ break;
+
+ case BC_JFUNCF:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IFUNCF:
+ | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
+ | ld TMP2, L->maxstack
+ | lbu TMP1, -4+PC2PROTO(numparams)(PC)
+ | ld KBASE, -4+PC2PROTO(k)(PC)
+ | sltu AT, TMP2, RA
+ | bnez AT, ->vm_growstack_l
+ |. sll TMP1, TMP1, 3
+ if (op != BC_JFUNCF) {
+ | ins_next1
+ }
+ |2:
+ | sltu AT, NARGS8:RC, TMP1 // Check for missing parameters.
+ | bnez AT, >3
+ |. daddu AT, BASE, NARGS8:RC
+ if (op == BC_JFUNCF) {
+ | decode_RD8a RD, INS
+ | b =>BC_JLOOP
+ |. decode_RD8b RD
+ } else {
+ | ins_next2
+ }
+ |
+ |3: // Clear missing parameters.
+ | sd TISNIL, 0(AT)
+ | b <2
+ |. addiu NARGS8:RC, NARGS8:RC, 8
+ break;
+
+ case BC_JFUNCV:
+#if !LJ_HASJIT
+ break;
+#endif
+ | NYI // NYI: compiled vararg functions
+ break; /* NYI: compiled vararg functions. */
+
+ case BC_IFUNCV:
+ | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
+ | li TMP0, LJ_TFUNC
+ | daddu TMP1, BASE, RC
+ | ld TMP2, L->maxstack
+ | settp LFUNC:RB, TMP0
+ | daddu TMP0, RA, RC
+ | sd LFUNC:RB, 0(TMP1) // Store (tagged) copy of LFUNC.
+ | daddiu TMP2, TMP2, -8
+ | daddiu TMP3, RC, 16+FRAME_VARG
+ | sltu AT, TMP0, TMP2
+ | ld KBASE, -4+PC2PROTO(k)(PC)
+ | beqz AT, ->vm_growstack_l
+ |. sd TMP3, 8(TMP1) // Store delta + FRAME_VARG.
+ | lbu TMP2, -4+PC2PROTO(numparams)(PC)
+ | move RA, BASE
+ | move RC, TMP1
+ | ins_next1
+ | beqz TMP2, >3
+ |. daddiu BASE, TMP1, 16
+ |1:
+ | ld TMP0, 0(RA)
+ | sltu AT, RA, RC // Less args than parameters?
+ | move CARG1, TMP0
+ |.if MIPSR6
+ | selnez TMP0, TMP0, AT
+ | seleqz TMP3, TISNIL, AT
+ | or TMP0, TMP0, TMP3
+ | seleqz TMP3, CARG1, AT
+ | selnez CARG1, TISNIL, AT
+ | or CARG1, CARG1, TMP3
+ |.else
+ | movz TMP0, TISNIL, AT // Clear missing parameters.
+ | movn CARG1, TISNIL, AT // Clear old fixarg slot (help the GC).
+ |.endif
+ | addiu TMP2, TMP2, -1
+ | sd TMP0, 16(TMP1)
+ | daddiu TMP1, TMP1, 8
+ | sd CARG1, 0(RA)
+ | bnez TMP2, <1
+ |. daddiu RA, RA, 8
+ |3:
+ | ins_next2
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ | // BASE = new base, RA = BASE+framesize*8, RB = CFUNC, RC = nargs*8
+ if (op == BC_FUNCC) {
+ | ld CFUNCADDR, CFUNC:RB->f
+ } else {
+ | ld CFUNCADDR, DISPATCH_GL(wrapf)(DISPATCH)
+ }
+ | daddu TMP1, RA, NARGS8:RC
+ | ld TMP2, L->maxstack
+ | daddu RC, BASE, NARGS8:RC
+ | sd BASE, L->base
+ | sltu AT, TMP2, TMP1
+ | sd RC, L->top
+ | li_vmstate C
+ if (op == BC_FUNCCW) {
+ | ld CARG2, CFUNC:RB->f
+ }
+ | bnez AT, ->vm_growstack_c // Need to grow stack.
+ |. move CARG1, L
+ | jalr CFUNCADDR // (lua_State *L [, lua_CFunction f])
+ |. st_vmstate
+ | // Returns nresults.
+ | ld BASE, L->base
+ | sll RD, CRET1, 3
+ | ld TMP1, L->top
+ | li_vmstate INTERP
+ | ld PC, FRAME_PC(BASE) // Fetch PC of caller.
+ | dsubu RA, TMP1, RD // RA = L->top - nresults*8
+ | sd L, DISPATCH_GL(cur_L)(DISPATCH)
+ | b ->vm_returnc
+ |. st_vmstate
+ break;
+
+ /* ---------------------------------------------------------------------- */
+
+ default:
+ fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
+ exit(2);
+ break;
+ }
+}
+
+static int build_backend(BuildCtx *ctx)
+{
+ int op;
+
+ dasm_growpc(Dst, BC__MAX);
+
+ build_subroutines(ctx);
+
+ |.code_op
+ for (op = 0; op < BC__MAX; op++)
+ build_ins(ctx, (BCOp)op, op);
+
+ return BC__MAX;
+}
+
+/* Emit pseudo frame-info for all assembler functions. */
+static void emit_asm_debug(BuildCtx *ctx)
+{
+ int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
+ int i;
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe0:\n"
+ "\t.4byte .LECIE0-.LSCIE0\n"
+ ".LSCIE0:\n"
+ "\t.4byte 0xffffffff\n"
+ "\t.byte 0x1\n"
+ "\t.string \"\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 31\n"
+ "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE0:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE0:\n"
+ "\t.4byte .LEFDE0-.LASFDE0\n"
+ ".LASFDE0:\n"
+ "\t.4byte .Lframe0\n"
+ "\t.8byte .Lbegin\n"
+ "\t.8byte %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n"
+ "\t.byte 0x9f\n\t.sleb128 2*5\n"
+ "\t.byte 0x9e\n\t.sleb128 2*6\n",
+ fcofs, CFRAME_SIZE);
+ for (i = 23; i >= 16; i--)
+ fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 2*(30-i));
+#if !LJ_SOFTFP
+ for (i = 31; i >= 24; i--)
+ fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+32+i, 2*(46-i));
+#endif
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE0:\n\n");
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".LSFDE1:\n"
+ "\t.4byte .LEFDE1-.LASFDE1\n"
+ ".LASFDE1:\n"
+ "\t.4byte .Lframe0\n"
+ "\t.4byte lj_vm_ffi_call\n"
+ "\t.4byte %d\n"
+ "\t.byte 0x9f\n\t.uleb128 2*1\n"
+ "\t.byte 0x90\n\t.uleb128 2*2\n"
+ "\t.byte 0xd\n\t.uleb128 0x10\n"
+ "\t.align 2\n"
+ ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
+#endif
+#if !LJ_NO_UNWIND
+ /* NYI */
+#endif
+ break;
+ default:
+ break;
+ }
+}
+
| b ->vm_call_dispatch
|
|.ffunc xpcall
+ | lwz TMP1, L->maxstack
+ | add TMP2, BASE, NARGS8:RC
| cmplwi NARGS8:RC, 16
- | lwz CARG4, 8(BASE)
+ | lwz CARG3, 8(BASE)
+ | cmplw cr1, TMP1, TMP2
+ |.if FPU
| lfd FARG2, 8(BASE)
+ | cror 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
| lfd FARG1, 0(BASE)
+ |.else
+ | lwz CARG1, 0(BASE)
+ | lwz CARG2, 4(BASE)
++ | cror 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
+ | lwz CARG4, 12(BASE)
+ |.endif
| blt ->fff_fallback
| lbz TMP1, DISPATCH_GL(hookmask)(DISPATCH)
| mr TMP2, BASE
--- /dev/null
+|// Low-level VM code for x64 CPUs in LJ_GC64 mode.
+|// Bytecode interpreter, fast functions and helper functions.
+|// Copyright (C) 2005-2023 Mike Pall. See Copyright Notice in luajit.h
+|
+|.arch x64
+|.section code_op, code_sub
+|
+|.actionlist build_actionlist
+|.globals GLOB_
+|.globalnames globnames
+|.externnames extnames
+|
+|//-----------------------------------------------------------------------
+|
+|.if WIN
+|.define X64WIN, 1 // Windows/x64 calling conventions.
+|.endif
+|
+|// Fixed register assignments for the interpreter.
+|// This is very fragile and has many dependencies. Caveat emptor.
+|.define BASE, rdx // Not C callee-save, refetched anyway.
+|.if X64WIN
+|.define KBASE, rdi // Must be C callee-save.
+|.define PC, rsi // Must be C callee-save.
+|.define DISPATCH, rbx // Must be C callee-save.
+|.define KBASEd, edi
+|.define PCd, esi
+|.define DISPATCHd, ebx
+|.else
+|.define KBASE, r15 // Must be C callee-save.
+|.define PC, rbx // Must be C callee-save.
+|.define DISPATCH, r14 // Must be C callee-save.
+|.define KBASEd, r15d
+|.define PCd, ebx
+|.define DISPATCHd, r14d
+|.endif
+|
+|.define RA, rcx
+|.define RAd, ecx
+|.define RAH, ch
+|.define RAL, cl
+|.define RB, rbp // Must be rbp (C callee-save).
+|.define RBd, ebp
+|.define RC, rax // Must be rax.
+|.define RCd, eax
+|.define RCW, ax
+|.define RCH, ah
+|.define RCL, al
+|.define OP, RBd
+|.define RD, RC
+|.define RDd, RCd
+|.define RDW, RCW
+|.define RDL, RCL
+|.define TMPR, r10
+|.define TMPRd, r10d
+|.define ITYPE, r11
+|.define ITYPEd, r11d
+|
+|.if X64WIN
+|.define CARG1, rcx // x64/WIN64 C call arguments.
+|.define CARG2, rdx
+|.define CARG3, r8
+|.define CARG4, r9
+|.define CARG1d, ecx
+|.define CARG2d, edx
+|.define CARG3d, r8d
+|.define CARG4d, r9d
+|.else
+|.define CARG1, rdi // x64/POSIX C call arguments.
+|.define CARG2, rsi
+|.define CARG3, rdx
+|.define CARG4, rcx
+|.define CARG5, r8
+|.define CARG6, r9
+|.define CARG1d, edi
+|.define CARG2d, esi
+|.define CARG3d, edx
+|.define CARG4d, ecx
+|.define CARG5d, r8d
+|.define CARG6d, r9d
+|.endif
+|
+|// Type definitions. Some of these are only used for documentation.
+|.type L, lua_State
+|.type GL, global_State
+|.type TVALUE, TValue
+|.type GCOBJ, GCobj
+|.type STR, GCstr
+|.type TAB, GCtab
+|.type LFUNC, GCfuncL
+|.type CFUNC, GCfuncC
+|.type PROTO, GCproto
+|.type UPVAL, GCupval
+|.type NODE, Node
+|.type NARGS, int
+|.type TRACE, GCtrace
+|.type SBUF, SBuf
+|
+|// Stack layout while in interpreter. Must match with lj_frame.h.
+|//-----------------------------------------------------------------------
+|.if X64WIN // x64/Windows stack layout
+|
+|.define CFRAME_SPACE, aword*5 // Delta for rsp (see <--).
+|.macro saveregs_
+| push rdi; push rsi; push rbx
+| sub rsp, CFRAME_SPACE
+|.endmacro
+|.macro saveregs
+| push rbp; saveregs_
+|.endmacro
+|.macro restoreregs
+| add rsp, CFRAME_SPACE
+| pop rbx; pop rsi; pop rdi; pop rbp
+|.endmacro
+|
+|.define SAVE_CFRAME, aword [rsp+aword*13]
+|.define SAVE_PC, aword [rsp+aword*12]
+|.define SAVE_L, aword [rsp+aword*11]
+|.define SAVE_ERRF, dword [rsp+dword*21]
+|.define SAVE_NRES, dword [rsp+dword*20]
+|//----- 16 byte aligned, ^^^ 32 byte register save area, owned by interpreter
+|.define SAVE_RET, aword [rsp+aword*9] //<-- rsp entering interpreter.
+|.define SAVE_R4, aword [rsp+aword*8]
+|.define SAVE_R3, aword [rsp+aword*7]
+|.define SAVE_R2, aword [rsp+aword*6]
+|.define SAVE_R1, aword [rsp+aword*5] //<-- rsp after register saves.
+|.define ARG5, aword [rsp+aword*4]
+|.define CSAVE_4, aword [rsp+aword*3]
+|.define CSAVE_3, aword [rsp+aword*2]
+|.define CSAVE_2, aword [rsp+aword*1]
+|.define CSAVE_1, aword [rsp] //<-- rsp while in interpreter.
+|//----- 16 byte aligned, ^^^ 32 byte register save area, owned by callee
+|
+|.define ARG5d, dword [rsp+dword*8]
+|.define TMP1, ARG5 // TMP1 overlaps ARG5
+|.define TMP1d, ARG5d
+|.define TMP1hi, dword [rsp+dword*9]
+|.define MULTRES, TMP1d // MULTRES overlaps TMP1d.
+|
+|//-----------------------------------------------------------------------
+|.else // x64/POSIX stack layout
+|
+|.define CFRAME_SPACE, aword*5 // Delta for rsp (see <--).
+|.macro saveregs_
+| push rbx; push r15; push r14
+|.if NO_UNWIND
+| push r13; push r12
+|.endif
+| sub rsp, CFRAME_SPACE
+|.endmacro
+|.macro saveregs
+| push rbp; saveregs_
+|.endmacro
+|.macro restoreregs
+| add rsp, CFRAME_SPACE
+|.if NO_UNWIND
+| pop r12; pop r13
+|.endif
+| pop r14; pop r15; pop rbx; pop rbp
+|.endmacro
+|
+|//----- 16 byte aligned,
+|.if NO_UNWIND
+|.define SAVE_RET, aword [rsp+aword*11] //<-- rsp entering interpreter.
+|.define SAVE_R4, aword [rsp+aword*10]
+|.define SAVE_R3, aword [rsp+aword*9]
+|.define SAVE_R2, aword [rsp+aword*8]
+|.define SAVE_R1, aword [rsp+aword*7]
+|.define SAVE_RU2, aword [rsp+aword*6]
+|.define SAVE_RU1, aword [rsp+aword*5] //<-- rsp after register saves.
+|.else
+|.define SAVE_RET, aword [rsp+aword*9] //<-- rsp entering interpreter.
+|.define SAVE_R4, aword [rsp+aword*8]
+|.define SAVE_R3, aword [rsp+aword*7]
+|.define SAVE_R2, aword [rsp+aword*6]
+|.define SAVE_R1, aword [rsp+aword*5] //<-- rsp after register saves.
+|.endif
+|.define SAVE_CFRAME, aword [rsp+aword*4]
+|.define SAVE_PC, aword [rsp+aword*3]
+|.define SAVE_L, aword [rsp+aword*2]
+|.define SAVE_ERRF, dword [rsp+dword*3]
+|.define SAVE_NRES, dword [rsp+dword*2]
+|.define TMP1, aword [rsp] //<-- rsp while in interpreter.
+|//----- 16 byte aligned
+|
+|.define TMP1d, dword [rsp]
+|.define TMP1hi, dword [rsp+dword*1]
+|.define MULTRES, TMP1d // MULTRES overlaps TMP1d.
+|
+|.endif
+|
+|//-----------------------------------------------------------------------
+|
+|// Instruction headers.
+|.macro ins_A; .endmacro
+|.macro ins_AD; .endmacro
+|.macro ins_AJ; .endmacro
+|.macro ins_ABC; movzx RBd, RCH; movzx RCd, RCL; .endmacro
+|.macro ins_AB_; movzx RBd, RCH; .endmacro
+|.macro ins_A_C; movzx RCd, RCL; .endmacro
+|.macro ins_AND; not RD; .endmacro
+|
+|// Instruction decode+dispatch. Carefully tuned (nope, lodsd is not faster).
+|.macro ins_NEXT
+| mov RCd, [PC]
+| movzx RAd, RCH
+| movzx OP, RCL
+| add PC, 4
+| shr RCd, 16
+| jmp aword [DISPATCH+OP*8]
+|.endmacro
+|
+|// Instruction footer.
+|.if 1
+| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
+| .define ins_next, ins_NEXT
+| .define ins_next_, ins_NEXT
+|.else
+| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
+| // Affects only certain kinds of benchmarks (and only with -j off).
+| // Around 10%-30% slower on Core2, a lot more slower on P4.
+| .macro ins_next
+| jmp ->ins_next
+| .endmacro
+| .macro ins_next_
+| ->ins_next:
+| ins_NEXT
+| .endmacro
+|.endif
+|
+|// Call decode and dispatch.
+|.macro ins_callt
+| // BASE = new base, RB = LFUNC, RD = nargs+1, [BASE-8] = PC
+| mov PC, LFUNC:RB->pc
+| mov RAd, [PC]
+| movzx OP, RAL
+| movzx RAd, RAH
+| add PC, 4
+| jmp aword [DISPATCH+OP*8]
+|.endmacro
+|
+|.macro ins_call
+| // BASE = new base, RB = LFUNC, RD = nargs+1
+| mov [BASE-8], PC
+| ins_callt
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Macros to clear or set tags.
+|.macro cleartp, reg; shl reg, 17; shr reg, 17; .endmacro
+|.macro settp, reg, tp
+| mov64 ITYPE, ((uint64_t)tp<<47)
+| or reg, ITYPE
+|.endmacro
+|.macro settp, dst, reg, tp
+| mov64 dst, ((uint64_t)tp<<47)
+| or dst, reg
+|.endmacro
+|.macro setint, reg
+| settp reg, LJ_TISNUM
+|.endmacro
+|.macro setint, dst, reg
+| settp dst, reg, LJ_TISNUM
+|.endmacro
+|
+|// Macros to test operand types.
+|.macro checktp_nc, reg, tp, target
+| mov ITYPE, reg
+| sar ITYPE, 47
+| cmp ITYPEd, tp
+| jne target
+|.endmacro
+|.macro checktp, reg, tp, target
+| mov ITYPE, reg
+| cleartp reg
+| sar ITYPE, 47
+| cmp ITYPEd, tp
+| jne target
+|.endmacro
+|.macro checktptp, src, tp, target
+| mov ITYPE, src
+| sar ITYPE, 47
+| cmp ITYPEd, tp
+| jne target
+|.endmacro
+|.macro checkstr, reg, target; checktp reg, LJ_TSTR, target; .endmacro
+|.macro checktab, reg, target; checktp reg, LJ_TTAB, target; .endmacro
+|.macro checkfunc, reg, target; checktp reg, LJ_TFUNC, target; .endmacro
+|
+|.macro checknumx, reg, target, jump
+| mov ITYPE, reg
+| sar ITYPE, 47
+| cmp ITYPEd, LJ_TISNUM
+| jump target
+|.endmacro
+|.macro checkint, reg, target; checknumx reg, target, jne; .endmacro
+|.macro checkinttp, src, target; checknumx src, target, jne; .endmacro
+|.macro checknum, reg, target; checknumx reg, target, jae; .endmacro
+|.macro checknumtp, src, target; checknumx src, target, jae; .endmacro
+|.macro checknumber, src, target; checknumx src, target, ja; .endmacro
+|
+|.macro mov_false, reg; mov64 reg, (int64_t)~((uint64_t)1<<47); .endmacro
+|.macro mov_true, reg; mov64 reg, (int64_t)~((uint64_t)2<<47); .endmacro
+|
+|// These operands must be used with movzx.
+|.define PC_OP, byte [PC-4]
+|.define PC_RA, byte [PC-3]
+|.define PC_RB, byte [PC-1]
+|.define PC_RC, byte [PC-2]
+|.define PC_RD, word [PC-2]
+|
+|.macro branchPC, reg
+| lea PC, [PC+reg*4-BCBIAS_J*4]
+|.endmacro
+|
+|// Assumes DISPATCH is relative to GL.
+#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
+#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
+|
+#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
+|
+|// Decrement hashed hotcount and trigger trace recorder if zero.
+|.macro hotloop, reg
+| mov reg, PCd
+| shr reg, 1
+| and reg, HOTCOUNT_PCMASK
+| sub word [DISPATCH+reg+GG_DISP2HOT], HOTCOUNT_LOOP
+| jb ->vm_hotloop
+|.endmacro
+|
+|.macro hotcall, reg
+| mov reg, PCd
+| shr reg, 1
+| and reg, HOTCOUNT_PCMASK
+| sub word [DISPATCH+reg+GG_DISP2HOT], HOTCOUNT_CALL
+| jb ->vm_hotcall
+|.endmacro
+|
+|// Set current VM state.
+|.macro set_vmstate, st
+| mov dword [DISPATCH+DISPATCH_GL(vmstate)], ~LJ_VMST_..st
+|.endmacro
+|
+|.macro fpop1; fstp st1; .endmacro
+|
+|// Synthesize SSE FP constants.
+|.macro sseconst_abs, reg, tmp // Synthesize abs mask.
+| mov64 tmp, U64x(7fffffff,ffffffff); movd reg, tmp
+|.endmacro
+|
+|.macro sseconst_hi, reg, tmp, val // Synthesize hi-32 bit const.
+| mov64 tmp, U64x(val,00000000); movd reg, tmp
+|.endmacro
+|
+|.macro sseconst_sign, reg, tmp // Synthesize sign mask.
+| sseconst_hi reg, tmp, 80000000
+|.endmacro
+|.macro sseconst_1, reg, tmp // Synthesize 1.0.
+| sseconst_hi reg, tmp, 3ff00000
+|.endmacro
+|.macro sseconst_2p52, reg, tmp // Synthesize 2^52.
+| sseconst_hi reg, tmp, 43300000
+|.endmacro
+|.macro sseconst_tobit, reg, tmp // Synthesize 2^52 + 2^51.
+| sseconst_hi reg, tmp, 43380000
+|.endmacro
+|
+|// Move table write barrier back. Overwrites reg.
+|.macro barrierback, tab, reg
+| and byte tab->marked, (uint8_t)~LJ_GC_BLACK // black2gray(tab)
+| mov reg, [DISPATCH+DISPATCH_GL(gc.grayagain)]
+| mov [DISPATCH+DISPATCH_GL(gc.grayagain)], tab
+| mov tab->gclist, reg
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+
+/* Generate subroutines used by opcodes and other parts of the VM. */
+/* The .code_sub section should be last to help static branch prediction. */
+static void build_subroutines(BuildCtx *ctx)
+{
+ |.code_sub
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Return handling ----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_returnp:
+ | test PCd, FRAME_P
+ | jz ->cont_dispatch
+ |
+ | // Return from pcall or xpcall fast func.
+ | and PC, -8
+ | sub BASE, PC // Restore caller base.
+ | lea RA, [RA+PC-8] // Rebase RA and prepend one result.
+ | mov PC, [BASE-8] // Fetch PC of previous frame.
+ | // Prepending may overwrite the pcall frame, so do it at the end.
+ | mov_true ITYPE
+ | mov aword [BASE+RA], ITYPE // Prepend true to results.
+ |
+ |->vm_returnc:
+ | add RDd, 1 // RD = nresults+1
+ | jz ->vm_unwind_yield
+ | mov MULTRES, RDd
+ | test PC, FRAME_TYPE
+ | jz ->BC_RET_Z // Handle regular return to Lua.
+ |
+ |->vm_return:
+ | // BASE = base, RA = resultofs, RD = nresults+1 (= MULTRES), PC = return
+ | xor PC, FRAME_C
+ | test PCd, FRAME_TYPE
+ | jnz ->vm_returnp
+ |
+ | // Return to C.
+ | set_vmstate C
+ | and PC, -8
+ | sub PC, BASE
+ | neg PC // Previous base = BASE - delta.
+ |
+ | sub RDd, 1
+ | jz >2
+ |1: // Move results down.
+ | mov RB, [BASE+RA]
+ | mov [BASE-16], RB
+ | add BASE, 8
+ | sub RDd, 1
+ | jnz <1
+ |2:
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, PC
+ |3:
+ | mov RDd, MULTRES
+ | mov RAd, SAVE_NRES // RA = wanted nresults+1
+ |4:
+ | cmp RAd, RDd
+ | jne >6 // More/less results wanted?
+ |5:
+ | sub BASE, 16
+ | mov L:RB->top, BASE
+ |
+ |->vm_leave_cp:
+ | mov RA, SAVE_CFRAME // Restore previous C frame.
+ | mov L:RB->cframe, RA
+ | xor eax, eax // Ok return status for vm_pcall.
+ |
+ |->vm_leave_unw:
+ | restoreregs
+ | ret
+ |
+ |6:
+ | jb >7 // Less results wanted?
+ | // More results wanted. Check stack size and fill up results with nil.
+ | cmp BASE, L:RB->maxstack
+ | ja >8
+ | mov aword [BASE-16], LJ_TNIL
+ | add BASE, 8
+ | add RDd, 1
+ | jmp <4
+ |
+ |7: // Less results wanted.
+ | test RAd, RAd
+ | jz <5 // But check for LUA_MULTRET+1.
+ | sub RA, RD // Negative result!
+ | lea BASE, [BASE+RA*8] // Correct top.
+ | jmp <5
+ |
+ |8: // Corner case: need to grow stack for filling up results.
+ | // This can happen if:
+ | // - A C function grows the stack (a lot).
+ | // - The GC shrinks the stack in between.
+ | // - A return back from a lua_call() with (high) nresults adjustment.
+ | mov L:RB->top, BASE // Save current top held in BASE (yes).
+ | mov MULTRES, RDd // Need to fill only remainder with nil.
+ | mov CARG2d, RAd
+ | mov CARG1, L:RB
+ | call extern lj_state_growstack // (lua_State *L, int n)
+ | mov BASE, L:RB->top // Need the (realloced) L->top in BASE.
+ | jmp <3
+ |
+ |->vm_unwind_yield:
+ | mov al, LUA_YIELD
+ | jmp ->vm_unwind_c_eh
+ |
+ |->vm_unwind_c: // Unwind C stack, return from vm_pcall.
+ | // (void *cframe, int errcode)
+ | mov eax, CARG2d // Error return status for vm_pcall.
+ | mov rsp, CARG1
+ |->vm_unwind_c_eh: // Landing pad for external unwinder.
+ | mov L:RB, SAVE_L
+ | mov GL:RB, L:RB->glref
+ | mov dword GL:RB->vmstate, ~LJ_VMST_C
+ | jmp ->vm_leave_unw
+ |
+ |->vm_unwind_rethrow:
+ |.if not X64WIN
+ | mov CARG1, SAVE_L
+ | mov CARG2d, eax
+ | restoreregs
+ | jmp extern lj_err_throw // (lua_State *L, int errcode)
+ |.endif
+ |
+ |->vm_unwind_ff: // Unwind C stack, return from ff pcall.
+ | // (void *cframe)
+ | and CARG1, CFRAME_RAWMASK
+ | mov rsp, CARG1
+ |->vm_unwind_ff_eh: // Landing pad for external unwinder.
+ | mov L:RB, SAVE_L
+ | mov RDd, 1+1 // Really 1+2 results, incr. later.
+ | mov BASE, L:RB->base
+ | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table.
+ | add DISPATCH, GG_G2DISP
+ | mov PC, [BASE-8] // Fetch PC of previous frame.
+ | mov_false RA
+ | mov RB, [BASE]
+ | mov [BASE-16], RA // Prepend false to error message.
+ | mov [BASE-8], RB
+ | mov RA, -16 // Results start at BASE+RA = BASE-16.
+ | set_vmstate INTERP
+ | jmp ->vm_returnc // Increments RD/MULTRES and returns.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Grow stack for calls -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_growstack_c: // Grow stack for C function.
+ | mov CARG2d, LUA_MINSTACK
+ | jmp >2
+ |
+ |->vm_growstack_v: // Grow stack for vararg Lua function.
+ | sub RD, 16 // LJ_FR2
+ | jmp >1
+ |
+ |->vm_growstack_f: // Grow stack for fixarg Lua function.
+ | // BASE = new base, RD = nargs+1, RB = L, PC = first PC
+ | lea RD, [BASE+NARGS:RD*8-8]
+ |1:
+ | movzx RAd, byte [PC-4+PC2PROTO(framesize)]
+ | add PC, 4 // Must point after first instruction.
+ | mov L:RB->base, BASE
+ | mov L:RB->top, RD
+ | mov SAVE_PC, PC
+ | mov CARG2, RA
+ |2:
+ | // RB = L, L->base = new base, L->top = top
+ | mov CARG1, L:RB
+ | call extern lj_state_growstack // (lua_State *L, int n)
+ | mov BASE, L:RB->base
+ | mov RD, L:RB->top
+ | mov LFUNC:RB, [BASE-16]
+ | cleartp LFUNC:RB
+ | sub RD, BASE
+ | shr RDd, 3
+ | add NARGS:RDd, 1
+ | // BASE = new base, RB = LFUNC, RD = nargs+1
+ | ins_callt // Just retry the call.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Entry points into the assembler VM ---------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_resume: // Setup C frame and resume thread.
+ | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
+ | saveregs
+ | mov L:RB, CARG1 // Caveat: CARG1 may be RA.
+ | mov SAVE_L, CARG1
+ | mov RA, CARG2
+ | mov PCd, FRAME_CP
+ | xor RDd, RDd
+ | lea KBASE, [esp+CFRAME_RESUME]
+ | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table.
+ | add DISPATCH, GG_G2DISP
+ | mov SAVE_PC, RD // Any value outside of bytecode is ok.
+ | mov SAVE_CFRAME, RD
+ | mov SAVE_NRES, RDd
+ | mov SAVE_ERRF, RDd
+ | mov L:RB->cframe, KBASE
+ | cmp byte L:RB->status, RDL
+ | je >2 // Initial resume (like a call).
+ |
+ | // Resume after yield (like a return).
+ | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB
+ | set_vmstate INTERP
+ | mov byte L:RB->status, RDL
+ | mov BASE, L:RB->base
+ | mov RD, L:RB->top
+ | sub RD, RA
+ | shr RDd, 3
+ | add RDd, 1 // RD = nresults+1
+ | sub RA, BASE // RA = resultofs
+ | mov PC, [BASE-8]
+ | mov MULTRES, RDd
+ | test PCd, FRAME_TYPE
+ | jz ->BC_RET_Z
+ | jmp ->vm_return
+ |
+ |->vm_pcall: // Setup protected C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
+ | saveregs
+ | mov PCd, FRAME_CP
+ | mov SAVE_ERRF, CARG4d
+ | jmp >1
+ |
+ |->vm_call: // Setup C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1)
+ | saveregs
+ | mov PCd, FRAME_C
+ |
+ |1: // Entry point for vm_pcall above (PC = ftype).
+ | mov SAVE_NRES, CARG3d
+ | mov L:RB, CARG1 // Caveat: CARG1 may be RA.
+ | mov SAVE_L, CARG1
+ | mov RA, CARG2
+ |
+ | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table.
+ | mov KBASE, L:RB->cframe // Add our C frame to cframe chain.
+ | mov SAVE_CFRAME, KBASE
+ | mov SAVE_PC, L:RB // Any value outside of bytecode is ok.
+ | add DISPATCH, GG_G2DISP
+ | mov L:RB->cframe, rsp
+ |
+ |2: // Entry point for vm_resume/vm_cpcall (RA = base, RB = L, PC = ftype).
+ | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB
+ | set_vmstate INTERP
+ | mov BASE, L:RB->base // BASE = old base (used in vmeta_call).
+ | add PC, RA
+ | sub PC, BASE // PC = frame delta + frame type
+ |
+ | mov RD, L:RB->top
+ | sub RD, RA
+ | shr NARGS:RDd, 3
+ | add NARGS:RDd, 1 // RD = nargs+1
+ |
+ |->vm_call_dispatch:
+ | mov LFUNC:RB, [RA-16]
+ | checkfunc LFUNC:RB, ->vmeta_call // Ensure KBASE defined and != BASE.
+ |
+ |->vm_call_dispatch_f:
+ | mov BASE, RA
+ | ins_call
+ | // BASE = new base, RB = func, RD = nargs+1, PC = caller PC
+ |
+ |->vm_cpcall: // Setup protected C frame, call C.
+ | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
+ | saveregs
+ | mov L:RB, CARG1 // Caveat: CARG1 may be RA.
+ | mov SAVE_L, CARG1
+ | mov SAVE_PC, L:RB // Any value outside of bytecode is ok.
+ |
+ | mov KBASE, L:RB->stack // Compute -savestack(L, L->top).
+ | sub KBASE, L:RB->top
+ | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table.
+ | mov SAVE_ERRF, 0 // No error function.
+ | mov SAVE_NRES, KBASEd // Neg. delta means cframe w/o frame.
+ | add DISPATCH, GG_G2DISP
+ | // Handler may change cframe_nres(L->cframe) or cframe_errfunc(L->cframe).
+ |
+ | mov KBASE, L:RB->cframe // Add our C frame to cframe chain.
+ | mov SAVE_CFRAME, KBASE
+ | mov L:RB->cframe, rsp
+ | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB
+ |
+ | call CARG4 // (lua_State *L, lua_CFunction func, void *ud)
+ | // TValue * (new base) or NULL returned in eax (RC).
+ | test RC, RC
+ | jz ->vm_leave_cp // No base? Just remove C frame.
+ | mov RA, RC
+ | mov PCd, FRAME_CP
+ | jmp <2 // Else continue with the call.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Metamethod handling ------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |//-- Continuation dispatch ----------------------------------------------
+ |
+ |->cont_dispatch:
+ | // BASE = meta base, RA = resultofs, RD = nresults+1 (also in MULTRES)
+ | add RA, BASE
+ | and PC, -8
+ | mov RB, BASE
+ | sub BASE, PC // Restore caller BASE.
+ | mov aword [RA+RD*8-8], LJ_TNIL // Ensure one valid arg.
+ | mov RC, RA // ... in [RC]
+ | mov PC, [RB-24] // Restore PC from [cont|PC].
+ | mov RA, qword [RB-32] // May be negative on WIN64 with debug.
+ |.if FFI
+ | cmp RA, 1
+ | jbe >1
+ |.endif
+ | mov LFUNC:KBASE, [BASE-16]
+ | cleartp LFUNC:KBASE
+ | mov KBASE, LFUNC:KBASE->pc
+ | mov KBASE, [KBASE+PC2PROTO(k)]
+ | // BASE = base, RC = result, RB = meta base
+ | jmp RA // Jump to continuation.
+ |
+ |.if FFI
+ |1:
+ | je ->cont_ffi_callback // cont = 1: return from FFI callback.
+ | // cont = 0: Tail call from C function.
+ | sub RB, BASE
+ | shr RBd, 3
+ | lea RDd, [RBd-3]
+ | jmp ->vm_call_tail
+ |.endif
+ |
+ |->cont_cat: // BASE = base, RC = result, RB = mbase
+ | movzx RAd, PC_RB
+ | sub RB, 32
+ | lea RA, [BASE+RA*8]
+ | sub RA, RB
+ | je ->cont_ra
+ | neg RA
+ | shr RAd, 3
+ |.if X64WIN
+ | mov CARG3d, RAd
+ | mov L:CARG1, SAVE_L
+ | mov L:CARG1->base, BASE
+ | mov RC, [RC]
+ | mov [RB], RC
+ | mov CARG2, RB
+ |.else
+ | mov L:CARG1, SAVE_L
+ | mov L:CARG1->base, BASE
+ | mov CARG3d, RAd
+ | mov RA, [RC]
+ | mov [RB], RA
+ | mov CARG2, RB
+ |.endif
+ | jmp ->BC_CAT_Z
+ |
+ |//-- Table indexing metamethods -----------------------------------------
+ |
+ |->vmeta_tgets:
+ | settp STR:RC, LJ_TSTR // STR:RC = GCstr *
+ | mov TMP1, STR:RC
+ | lea RC, TMP1
+ | cmp PC_OP, BC_GGET
+ | jne >1
+ | settp TAB:RA, TAB:RB, LJ_TTAB // TAB:RB = GCtab *
+ | lea RB, [DISPATCH+DISPATCH_GL(tmptv)] // Store fn->l.env in g->tmptv.
+ | mov [RB], TAB:RA
+ | jmp >2
+ |
+ |->vmeta_tgetb:
+ | movzx RCd, PC_RC
+ |.if DUALNUM
+ | setint RC
+ | mov TMP1, RC
+ |.else
+ | cvtsi2sd xmm0, RCd
+ | movsd TMP1, xmm0
+ |.endif
+ | lea RC, TMP1
+ | jmp >1
+ |
+ |->vmeta_tgetv:
+ | movzx RCd, PC_RC // Reload TValue *k from RC.
+ | lea RC, [BASE+RC*8]
+ |1:
+ | movzx RBd, PC_RB // Reload TValue *t from RB.
+ | lea RB, [BASE+RB*8]
+ |2:
+ | mov L:CARG1, SAVE_L
+ | mov L:CARG1->base, BASE // Caveat: CARG2/CARG3 may be BASE.
+ | mov CARG2, RB
+ | mov CARG3, RC
+ | mov L:RB, L:CARG1
+ | mov SAVE_PC, PC
+ | call extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k)
+ | // TValue * (finished) or NULL (metamethod) returned in eax (RC).
+ | mov BASE, L:RB->base
+ | test RC, RC
+ | jz >3
+ |->cont_ra: // BASE = base, RC = result
+ | movzx RAd, PC_RA
+ | mov RB, [RC]
+ | mov [BASE+RA*8], RB
+ | ins_next
+ |
+ |3: // Call __index metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k
+ | mov RA, L:RB->top
+ | mov [RA-24], PC // [cont|PC]
+ | lea PC, [RA+FRAME_CONT]
+ | sub PC, BASE
+ | mov LFUNC:RB, [RA-16] // Guaranteed to be a function here.
+ | mov NARGS:RDd, 2+1 // 2 args for func(t, k).
+ | cleartp LFUNC:RB
+ | jmp ->vm_call_dispatch_f
+ |
+ |->vmeta_tgetr:
+ | mov CARG1, TAB:RB
+ | mov RB, BASE // Save BASE.
+ | mov CARG2d, RCd // Caveat: CARG2 == BASE
+ | call extern lj_tab_getinth // (GCtab *t, int32_t key)
+ | // cTValue * or NULL returned in eax (RC).
+ | movzx RAd, PC_RA
+ | mov BASE, RB // Restore BASE.
+ | test RC, RC
+ | jnz ->BC_TGETR_Z
+ | mov ITYPE, LJ_TNIL
+ | jmp ->BC_TGETR2_Z
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->vmeta_tsets:
+ | settp STR:RC, LJ_TSTR // STR:RC = GCstr *
+ | mov TMP1, STR:RC
+ | lea RC, TMP1
+ | cmp PC_OP, BC_GSET
+ | jne >1
+ | settp TAB:RA, TAB:RB, LJ_TTAB // TAB:RB = GCtab *
+ | lea RB, [DISPATCH+DISPATCH_GL(tmptv)] // Store fn->l.env in g->tmptv.
+ | mov [RB], TAB:RA
+ | jmp >2
+ |
+ |->vmeta_tsetb:
+ | movzx RCd, PC_RC
+ |.if DUALNUM
+ | setint RC
+ | mov TMP1, RC
+ |.else
+ | cvtsi2sd xmm0, RCd
+ | movsd TMP1, xmm0
+ |.endif
+ | lea RC, TMP1
+ | jmp >1
+ |
+ |->vmeta_tsetv:
+ | movzx RCd, PC_RC // Reload TValue *k from RC.
+ | lea RC, [BASE+RC*8]
+ |1:
+ | movzx RBd, PC_RB // Reload TValue *t from RB.
+ | lea RB, [BASE+RB*8]
+ |2:
+ | mov L:CARG1, SAVE_L
+ | mov L:CARG1->base, BASE // Caveat: CARG2/CARG3 may be BASE.
+ | mov CARG2, RB
+ | mov CARG3, RC
+ | mov L:RB, L:CARG1
+ | mov SAVE_PC, PC
+ | call extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
+ | // TValue * (finished) or NULL (metamethod) returned in eax (RC).
+ | mov BASE, L:RB->base
+ | test RC, RC
+ | jz >3
+ | // NOBARRIER: lj_meta_tset ensures the table is not black.
+ | movzx RAd, PC_RA
+ | mov RB, [BASE+RA*8]
+ | mov [RC], RB
+ |->cont_nop: // BASE = base, (RC = result)
+ | ins_next
+ |
+ |3: // Call __newindex metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
+ | mov RA, L:RB->top
+ | mov [RA-24], PC // [cont|PC]
+ | movzx RCd, PC_RA
+ | // Copy value to third argument.
+ | mov RB, [BASE+RC*8]
+ | mov [RA+16], RB
+ | lea PC, [RA+FRAME_CONT]
+ | sub PC, BASE
+ | mov LFUNC:RB, [RA-16] // Guaranteed to be a function here.
+ | mov NARGS:RDd, 3+1 // 3 args for func(t, k, v).
+ | cleartp LFUNC:RB
+ | jmp ->vm_call_dispatch_f
+ |
+ |->vmeta_tsetr:
+ |.if X64WIN
+ | mov L:CARG1, SAVE_L
+ | mov CARG3d, RCd
+ | mov L:CARG1->base, BASE
+ | xchg CARG2, TAB:RB // Caveat: CARG2 == BASE.
+ |.else
+ | mov L:CARG1, SAVE_L
+ | mov CARG2, TAB:RB
+ | mov L:CARG1->base, BASE
+ | mov RB, BASE // Save BASE.
+ | mov CARG3d, RCd // Caveat: CARG3 == BASE.
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key)
+ | // TValue * returned in eax (RC).
+ | movzx RAd, PC_RA
+ | mov BASE, RB // Restore BASE.
+ | jmp ->BC_TSETR_Z
+ |
+ |//-- Comparison metamethods ---------------------------------------------
+ |
+ |->vmeta_comp:
+ | movzx RDd, PC_RD
+ | movzx RAd, PC_RA
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Caveat: CARG2/CARG3 == BASE.
+ |.if X64WIN
+ | lea CARG3, [BASE+RD*8]
+ | lea CARG2, [BASE+RA*8]
+ |.else
+ | lea CARG2, [BASE+RA*8]
+ | lea CARG3, [BASE+RD*8]
+ |.endif
+ | mov CARG1, L:RB // Caveat: CARG1/CARG4 == RA.
+ | movzx CARG4d, PC_OP
+ | mov SAVE_PC, PC
+ | call extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op)
+ | // 0/1 or TValue * (metamethod) returned in eax (RC).
+ |3:
+ | mov BASE, L:RB->base
+ | cmp RC, 1
+ | ja ->vmeta_binop
+ |4:
+ | lea PC, [PC+4]
+ | jb >6
+ |5:
+ | movzx RDd, PC_RD
+ | branchPC RD
+ |6:
+ | ins_next
+ |
+ |->cont_condt: // BASE = base, RC = result
+ | add PC, 4
+ | mov ITYPE, [RC]
+ | sar ITYPE, 47
+ | cmp ITYPEd, LJ_TISTRUECOND // Branch if result is true.
+ | jb <5
+ | jmp <6
+ |
+ |->cont_condf: // BASE = base, RC = result
+ | mov ITYPE, [RC]
+ | sar ITYPE, 47
+ | cmp ITYPEd, LJ_TISTRUECOND // Branch if result is false.
+ | jmp <4
+ |
+ |->vmeta_equal:
+ | cleartp TAB:RD
+ | sub PC, 4
+ |.if X64WIN
+ | mov CARG3, RD
+ | mov CARG4d, RBd
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Caveat: CARG2 == BASE.
+ | mov CARG2, RA
+ | mov CARG1, L:RB // Caveat: CARG1 == RA.
+ |.else
+ | mov CARG2, RA
+ | mov CARG4d, RBd // Caveat: CARG4 == RA.
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Caveat: CARG3 == BASE.
+ | mov CARG3, RD
+ | mov CARG1, L:RB
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne)
+ | // 0/1 or TValue * (metamethod) returned in eax (RC).
+ | jmp <3
+ |
+ |->vmeta_equal_cd:
+ |.if FFI
+ | sub PC, 4
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov CARG1, L:RB
+ | mov CARG2d, dword [PC-4]
+ | mov SAVE_PC, PC
+ | call extern lj_meta_equal_cd // (lua_State *L, BCIns ins)
+ | // 0/1 or TValue * (metamethod) returned in eax (RC).
+ | jmp <3
+ |.endif
+ |
+ |->vmeta_istype:
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Caveat: CARG2/CARG3 may be BASE.
+ | mov CARG2d, RAd
+ | mov CARG3d, RDd
+ | mov L:CARG1, L:RB
+ | mov SAVE_PC, PC
+ | call extern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp)
+ | mov BASE, L:RB->base
+ | jmp <6
+ |
+ |//-- Arithmetic metamethods ---------------------------------------------
+ |
+ |->vmeta_arith_vno:
+ |.if DUALNUM
+ | movzx RBd, PC_RB
+ | movzx RCd, PC_RC
+ |.endif
+ |->vmeta_arith_vn:
+ | lea RC, [KBASE+RC*8]
+ | jmp >1
+ |
+ |->vmeta_arith_nvo:
+ |.if DUALNUM
+ | movzx RBd, PC_RB
+ | movzx RCd, PC_RC
+ |.endif
+ |->vmeta_arith_nv:
+ | lea TMPR, [KBASE+RC*8]
+ | lea RC, [BASE+RB*8]
+ | mov RB, TMPR
+ | jmp >2
+ |
+ |->vmeta_unm:
+ | lea RC, [BASE+RD*8]
+ | mov RB, RC
+ | jmp >2
+ |
+ |->vmeta_arith_vvo:
+ |.if DUALNUM
+ | movzx RBd, PC_RB
+ | movzx RCd, PC_RC
+ |.endif
+ |->vmeta_arith_vv:
+ | lea RC, [BASE+RC*8]
+ |1:
+ | lea RB, [BASE+RB*8]
+ |2:
+ | lea RA, [BASE+RA*8]
+ |.if X64WIN
+ | mov CARG3, RB
+ | mov CARG4, RC
+ | movzx RCd, PC_OP
+ | mov ARG5d, RCd
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Caveat: CARG2 == BASE.
+ | mov CARG2, RA
+ | mov CARG1, L:RB // Caveat: CARG1 == RA.
+ |.else
+ | movzx CARG5d, PC_OP
+ | mov CARG2, RA
+ | mov CARG4, RC // Caveat: CARG4 == RA.
+ | mov L:CARG1, SAVE_L
+ | mov L:CARG1->base, BASE // Caveat: CARG3 == BASE.
+ | mov CARG3, RB
+ | mov L:RB, L:CARG1
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
+ | // NULL (finished) or TValue * (metamethod) returned in eax (RC).
+ | mov BASE, L:RB->base
+ | test RC, RC
+ | jz ->cont_nop
+ |
+ | // Call metamethod for binary op.
+ |->vmeta_binop:
+ | // BASE = base, RC = new base, stack = cont/func/o1/o2
+ | mov RA, RC
+ | sub RC, BASE
+ | mov [RA-24], PC // [cont|PC]
+ | lea PC, [RC+FRAME_CONT]
+ | mov NARGS:RDd, 2+1 // 2 args for func(o1, o2).
+ | jmp ->vm_call_dispatch
+ |
+ |->vmeta_len:
+ | movzx RDd, PC_RD
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | lea CARG2, [BASE+RD*8] // Caveat: CARG2 == BASE
+ | mov L:CARG1, L:RB
+ | mov SAVE_PC, PC
+ | call extern lj_meta_len // (lua_State *L, TValue *o)
+ | // NULL (retry) or TValue * (metamethod) returned in eax (RC).
+ | mov BASE, L:RB->base
+#if LJ_52
+ | test RC, RC
+ | jne ->vmeta_binop // Binop call for compatibility.
+ | movzx RDd, PC_RD
+ | mov TAB:CARG1, [BASE+RD*8]
+ | cleartp TAB:CARG1
+ | jmp ->BC_LEN_Z
+#else
+ | jmp ->vmeta_binop // Binop call for compatibility.
+#endif
+ |
+ |//-- Call metamethod ----------------------------------------------------
+ |
+ |->vmeta_call_ra:
+ | lea RA, [BASE+RA*8+16]
+ |->vmeta_call: // Resolve and call __call metamethod.
+ | // BASE = old base, RA = new base, RC = nargs+1, PC = return
+ | mov TMP1d, NARGS:RDd // Save RA, RC for us.
+ | mov RB, RA
+ |.if X64WIN
+ | mov L:TMPR, SAVE_L
+ | mov L:TMPR->base, BASE // Caveat: CARG2 is BASE.
+ | lea CARG2, [RA-16]
+ | lea CARG3, [RA+NARGS:RD*8-8]
+ | mov CARG1, L:TMPR // Caveat: CARG1 is RA.
+ |.else
+ | mov L:CARG1, SAVE_L
+ | mov L:CARG1->base, BASE // Caveat: CARG3 is BASE.
+ | lea CARG2, [RA-16]
+ | lea CARG3, [RA+NARGS:RD*8-8]
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ | mov RA, RB
+ | mov L:RB, SAVE_L
+ | mov BASE, L:RB->base
+ | mov NARGS:RDd, TMP1d
+ | mov LFUNC:RB, [RA-16]
+ | add NARGS:RDd, 1
+ | // This is fragile. L->base must not move, KBASE must always be defined.
+ | cmp KBASE, BASE // Continue with CALLT if flag set.
+ | je ->BC_CALLT_Z
+ | cleartp LFUNC:RB
+ | mov BASE, RA
+ | ins_call // Otherwise call resolved metamethod.
+ |
+ |//-- Argument coercion for 'for' statement ------------------------------
+ |
+ |->vmeta_for:
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov CARG2, RA // Caveat: CARG2 == BASE
+ | mov L:CARG1, L:RB // Caveat: CARG1 == RA
+ | mov SAVE_PC, PC
+ | call extern lj_meta_for // (lua_State *L, TValue *base)
+ | mov BASE, L:RB->base
+ | mov RCd, [PC-4]
+ | movzx RAd, RCH
+ | movzx OP, RCL
+ | shr RCd, 16
+ | jmp aword [DISPATCH+OP*8+GG_DISP2STATIC] // Retry FORI or JFORI.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Fast functions -----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro .ffunc, name
+ |->ff_ .. name:
+ |.endmacro
+ |
+ |.macro .ffunc_1, name
+ |->ff_ .. name:
+ | cmp NARGS:RDd, 1+1; jb ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_2, name
+ |->ff_ .. name:
+ | cmp NARGS:RDd, 2+1; jb ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_n, name, op
+ | .ffunc_1 name
+ | checknumtp [BASE], ->fff_fallback
+ | op xmm0, qword [BASE]
+ |.endmacro
+ |
+ |.macro .ffunc_n, name
+ | .ffunc_n name, movsd
+ |.endmacro
+ |
+ |.macro .ffunc_nn, name
+ | .ffunc_2 name
+ | checknumtp [BASE], ->fff_fallback
+ | checknumtp [BASE+8], ->fff_fallback
+ | movsd xmm0, qword [BASE]
+ | movsd xmm1, qword [BASE+8]
+ |.endmacro
+ |
+ |// Inlined GC threshold check. Caveat: uses label 1.
+ |.macro ffgccheck
+ | mov RB, [DISPATCH+DISPATCH_GL(gc.total)]
+ | cmp RB, [DISPATCH+DISPATCH_GL(gc.threshold)]
+ | jb >1
+ | call ->fff_gcstep
+ |1:
+ |.endmacro
+ |
+ |//-- Base library: checks -----------------------------------------------
+ |
+ |.ffunc_1 assert
+ | mov ITYPE, [BASE]
+ | mov RB, ITYPE
+ | sar ITYPE, 47
+ | cmp ITYPEd, LJ_TISTRUECOND; jae ->fff_fallback
+ | mov PC, [BASE-8]
+ | mov MULTRES, RDd
+ | mov RB, [BASE]
+ | mov [BASE-16], RB
+ | sub RDd, 2
+ | jz >2
+ | mov RA, BASE
+ |1:
+ | add RA, 8
+ | mov RB, [RA]
+ | mov [RA-16], RB
+ | sub RDd, 1
+ | jnz <1
+ |2:
+ | mov RDd, MULTRES
+ | jmp ->fff_res_
+ |
+ |.ffunc_1 type
+ | mov RC, [BASE]
+ | sar RC, 47
+ | mov RBd, LJ_TISNUM
+ | cmp RCd, RBd
+ | cmovb RCd, RBd
+ | not RCd
+ |2:
+ | mov CFUNC:RB, [BASE-16]
+ | cleartp CFUNC:RB
+ | mov STR:RC, [CFUNC:RB+RC*8+((char *)(&((GCfuncC *)0)->upvalue))]
+ | mov PC, [BASE-8]
+ | settp STR:RC, LJ_TSTR
+ | mov [BASE-16], STR:RC
+ | jmp ->fff_res1
+ |
+ |//-- Base library: getters and setters ---------------------------------
+ |
+ |.ffunc_1 getmetatable
+ | mov TAB:RB, [BASE]
+ | mov PC, [BASE-8]
+ | checktab TAB:RB, >6
+ |1: // Field metatable must be at same offset for GCtab and GCudata!
+ | mov TAB:RB, TAB:RB->metatable
+ |2:
+ | test TAB:RB, TAB:RB
+ | mov aword [BASE-16], LJ_TNIL
+ | jz ->fff_res1
+ | settp TAB:RC, TAB:RB, LJ_TTAB
+ | mov [BASE-16], TAB:RC // Store metatable as default result.
+ | mov STR:RC, [DISPATCH+DISPATCH_GL(gcroot)+8*(GCROOT_MMNAME+MM_metatable)]
+ | mov RAd, TAB:RB->hmask
+ | and RAd, STR:RC->sid
+ | settp STR:RC, LJ_TSTR
+ | imul RAd, #NODE
+ | add NODE:RA, TAB:RB->node
+ |3: // Rearranged logic, because we expect _not_ to find the key.
+ | cmp NODE:RA->key, STR:RC
+ | je >5
+ |4:
+ | mov NODE:RA, NODE:RA->next
+ | test NODE:RA, NODE:RA
+ | jnz <3
+ | jmp ->fff_res1 // Not found, keep default result.
+ |5:
+ | mov RB, NODE:RA->val
+ | cmp RB, LJ_TNIL; je ->fff_res1 // Ditto for nil value.
+ | mov [BASE-16], RB // Return value of mt.__metatable.
+ | jmp ->fff_res1
+ |
+ |6:
+ | cmp ITYPEd, LJ_TUDATA; je <1
+ | cmp ITYPEd, LJ_TISNUM; ja >7
+ | mov ITYPEd, LJ_TISNUM
+ |7:
+ | not ITYPEd
+ | mov TAB:RB, [DISPATCH+ITYPE*8+DISPATCH_GL(gcroot[GCROOT_BASEMT])]
+ | jmp <2
+ |
+ |.ffunc_2 setmetatable
+ | mov TAB:RB, [BASE]
+ | mov TAB:TMPR, TAB:RB
+ | checktab TAB:RB, ->fff_fallback
+ | // Fast path: no mt for table yet and not clearing the mt.
+ | cmp aword TAB:RB->metatable, 0; jne ->fff_fallback
+ | mov TAB:RA, [BASE+8]
+ | checktab TAB:RA, ->fff_fallback
+ | mov TAB:RB->metatable, TAB:RA
+ | mov PC, [BASE-8]
+ | mov [BASE-16], TAB:TMPR // Return original table.
+ | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
+ | jz >1
+ | // Possible write barrier. Table is black, but skip iswhite(mt) check.
+ | barrierback TAB:RB, RC
+ |1:
+ | jmp ->fff_res1
+ |
+ |.ffunc_2 rawget
+ |.if X64WIN
+ | mov TAB:RA, [BASE]
+ | checktab TAB:RA, ->fff_fallback
+ | mov RB, BASE // Save BASE.
+ | lea CARG3, [BASE+8]
+ | mov CARG2, TAB:RA // Caveat: CARG2 == BASE.
+ | mov CARG1, SAVE_L
+ |.else
+ | mov TAB:CARG2, [BASE]
+ | checktab TAB:CARG2, ->fff_fallback
+ | mov RB, BASE // Save BASE.
+ | lea CARG3, [BASE+8] // Caveat: CARG3 == BASE.
+ | mov CARG1, SAVE_L
+ |.endif
+ | call extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
+ | // cTValue * returned in eax (RD).
+ | mov BASE, RB // Restore BASE.
+ | // Copy table slot.
+ | mov RB, [RD]
+ | mov PC, [BASE-8]
+ | mov [BASE-16], RB
+ | jmp ->fff_res1
+ |
+ |//-- Base library: conversions ------------------------------------------
+ |
+ |.ffunc tonumber
+ | // Only handles the number case inline (without a base argument).
+ | cmp NARGS:RDd, 1+1; jne ->fff_fallback // Exactly one argument.
+ | mov RB, [BASE]
+ | checknumber RB, ->fff_fallback
+ | mov PC, [BASE-8]
+ | mov [BASE-16], RB
+ | jmp ->fff_res1
+ |
+ |.ffunc_1 tostring
+ | // Only handles the string or number case inline.
+ | mov PC, [BASE-8]
+ | mov STR:RB, [BASE]
+ | checktp_nc STR:RB, LJ_TSTR, >3
+ | // A __tostring method in the string base metatable is ignored.
+ |2:
+ | mov [BASE-16], STR:RB
+ | jmp ->fff_res1
+ |3: // Handle numbers inline, unless a number base metatable is present.
+ | cmp ITYPEd, LJ_TISNUM; ja ->fff_fallback_1
+ | cmp aword [DISPATCH+DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])], 0
+ | jne ->fff_fallback
+ | ffgccheck // Caveat: uses label 1.
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Add frame since C call can throw.
+ | mov SAVE_PC, PC // Redundant (but a defined value).
+ |.if not X64WIN
+ | mov CARG2, BASE // Otherwise: CARG2 == BASE
+ |.endif
+ | mov L:CARG1, L:RB
+ |.if DUALNUM
+ | call extern lj_strfmt_number // (lua_State *L, cTValue *o)
+ |.else
+ | call extern lj_strfmt_num // (lua_State *L, lua_Number *np)
+ |.endif
+ | // GCstr returned in eax (RD).
+ | mov BASE, L:RB->base
+ | settp STR:RB, RD, LJ_TSTR
+ | jmp <2
+ |
+ |//-- Base library: iterators -------------------------------------------
+ |
+ |.ffunc_1 next
+ | je >2 // Missing 2nd arg?
+ |1:
+ | mov CARG1, [BASE]
+ | mov PC, [BASE-8]
+ | checktab CARG1, ->fff_fallback
+ | mov RB, BASE // Save BASE.
+ |.if X64WIN
+ | lea CARG3, [BASE-16]
+ | lea CARG2, [BASE+8] // Caveat: CARG2 == BASE.
+ |.else
+ | lea CARG2, [BASE+8]
+ | lea CARG3, [BASE-16] // Caveat: CARG3 == BASE.
+ |.endif
+ | call extern lj_tab_next // (GCtab *t, cTValue *key, TValue *o)
+ | // 1=found, 0=end, -1=error returned in eax (RD).
+ | mov BASE, RB // Restore BASE.
+ | test RDd, RDd; jg ->fff_res2 // Found key/value.
+ | js ->fff_fallback_2 // Invalid key.
+ | // End of traversal: return nil.
+ | mov aword [BASE-16], LJ_TNIL
+ | jmp ->fff_res1
+ |2: // Set missing 2nd arg to nil.
+ | mov aword [BASE+8], LJ_TNIL
+ | jmp <1
+ |
+ |.ffunc_1 pairs
+ | mov TAB:RB, [BASE]
+ | mov TMPR, TAB:RB
+ | checktab TAB:RB, ->fff_fallback
+#if LJ_52
+ | cmp aword TAB:RB->metatable, 0; jne ->fff_fallback
+#endif
+ | mov CFUNC:RD, [BASE-16]
+ | cleartp CFUNC:RD
+ | mov CFUNC:RD, CFUNC:RD->upvalue[0]
+ | settp CFUNC:RD, LJ_TFUNC
+ | mov PC, [BASE-8]
+ | mov [BASE-16], CFUNC:RD
+ | mov [BASE-8], TMPR
+ | mov aword [BASE], LJ_TNIL
+ | mov RDd, 1+3
+ | jmp ->fff_res
+ |
+ |.ffunc_2 ipairs_aux
+ | mov TAB:RB, [BASE]
+ | checktab TAB:RB, ->fff_fallback
+ |.if DUALNUM
+ | mov RA, [BASE+8]
+ | checkint RA, ->fff_fallback
+ |.else
+ | checknumtp [BASE+8], ->fff_fallback
+ | movsd xmm0, qword [BASE+8]
+ |.endif
+ | mov PC, [BASE-8]
+ |.if DUALNUM
+ | add RAd, 1
+ | setint ITYPE, RA
+ | mov [BASE-16], ITYPE
+ |.else
+ | sseconst_1 xmm1, TMPR
+ | addsd xmm0, xmm1
+ | cvttsd2si RAd, xmm0
+ | movsd qword [BASE-16], xmm0
+ |.endif
+ | cmp RAd, TAB:RB->asize; jae >2 // Not in array part?
+ | mov RD, TAB:RB->array
+ | lea RD, [RD+RA*8]
+ |1:
+ | cmp aword [RD], LJ_TNIL; je ->fff_res0
+ | // Copy array slot.
+ | mov RB, [RD]
+ | mov [BASE-8], RB
+ |->fff_res2:
+ | mov RDd, 1+2
+ | jmp ->fff_res
+ |2: // Check for empty hash part first. Otherwise call C function.
+ | cmp dword TAB:RB->hmask, 0; je ->fff_res0
+ |.if X64WIN
+ | mov TMPR, BASE
+ | mov CARG2d, RAd
+ | mov CARG1, TAB:RB
+ | mov RB, TMPR
+ |.else
+ | mov CARG1, TAB:RB
+ | mov RB, BASE // Save BASE.
+ | mov CARG2d, RAd // Caveat: CARG2 == BASE
+ |.endif
+ | call extern lj_tab_getinth // (GCtab *t, int32_t key)
+ | // cTValue * or NULL returned in eax (RD).
+ | mov BASE, RB
+ | test RD, RD
+ | jnz <1
+ |->fff_res0:
+ | mov RDd, 1+0
+ | jmp ->fff_res
+ |
+ |.ffunc_1 ipairs
+ | mov TAB:RB, [BASE]
+ | mov TMPR, TAB:RB
+ | checktab TAB:RB, ->fff_fallback
+#if LJ_52
+ | cmp aword TAB:RB->metatable, 0; jne ->fff_fallback
+#endif
+ | mov CFUNC:RD, [BASE-16]
+ | cleartp CFUNC:RD
+ | mov CFUNC:RD, CFUNC:RD->upvalue[0]
+ | settp CFUNC:RD, LJ_TFUNC
+ | mov PC, [BASE-8]
+ | mov [BASE-16], CFUNC:RD
+ | mov [BASE-8], TMPR
+ |.if DUALNUM
+ | mov64 RD, ((uint64_t)LJ_TISNUM<<47)
+ | mov [BASE], RD
+ |.else
+ | mov qword [BASE], 0
+ |.endif
+ | mov RDd, 1+3
+ | jmp ->fff_res
+ |
+ |//-- Base library: catch errors ----------------------------------------
+ |
+ |.ffunc_1 pcall
++ | mov L:RB, SAVE_L
++ | lea RA, [BASE+NARGS:RD*8]
++ | cmp RA, L:RB->maxstack; ja ->fff_fallback
+ | lea RA, [BASE+16]
+ | sub NARGS:RDd, 1
+ | mov PCd, 16+FRAME_PCALL
+ |1:
+ | movzx RBd, byte [DISPATCH+DISPATCH_GL(hookmask)]
+ | shr RB, HOOK_ACTIVE_SHIFT
+ | and RB, 1
+ | add PC, RB // Remember active hook before pcall.
+ | // Note: this does a (harmless) copy of the function to the PC slot, too.
+ | mov KBASE, RD
+ |2:
+ | mov RB, [RA+KBASE*8-24]
+ | mov [RA+KBASE*8-16], RB
+ | sub KBASE, 1
+ | ja <2
+ | jmp ->vm_call_dispatch
+ |
+ |.ffunc_2 xpcall
++ | mov L:RB, SAVE_L
++ | lea RA, [BASE+NARGS:RD*8]
++ | cmp RA, L:RB->maxstack; ja ->fff_fallback
+ | mov LFUNC:RA, [BASE+8]
+ | checktp_nc LFUNC:RA, LJ_TFUNC, ->fff_fallback
+ | mov LFUNC:RB, [BASE] // Swap function and traceback.
+ | mov [BASE], LFUNC:RA
+ | mov [BASE+8], LFUNC:RB
+ | lea RA, [BASE+24]
+ | sub NARGS:RDd, 2
+ | mov PCd, 24+FRAME_PCALL
+ | jmp <1
+ |
+ |//-- Coroutine library --------------------------------------------------
+ |
+ |.macro coroutine_resume_wrap, resume
+ |.if resume
+ |.ffunc_1 coroutine_resume
+ | mov L:RB, [BASE]
+ | cleartp L:RB
+ |.else
+ |.ffunc coroutine_wrap_aux
+ | mov CFUNC:RB, [BASE-16]
+ | cleartp CFUNC:RB
+ | mov L:RB, CFUNC:RB->upvalue[0].gcr
+ | cleartp L:RB
+ |.endif
+ | mov PC, [BASE-8]
+ | mov SAVE_PC, PC
+ | mov TMP1, L:RB
+ |.if resume
+ | checktptp [BASE], LJ_TTHREAD, ->fff_fallback
+ |.endif
+ | cmp aword L:RB->cframe, 0; jne ->fff_fallback
+ | cmp byte L:RB->status, LUA_YIELD; ja ->fff_fallback
+ | mov RA, L:RB->top
+ | je >1 // Status != LUA_YIELD (i.e. 0)?
+ | cmp RA, L:RB->base // Check for presence of initial func.
+ | je ->fff_fallback
+ | mov PC, [RA-8] // Move initial function up.
+ | mov [RA], PC
+ | add RA, 8
+ |1:
+ |.if resume
+ | lea PC, [RA+NARGS:RD*8-16] // Check stack space (-1-thread).
+ |.else
+ | lea PC, [RA+NARGS:RD*8-8] // Check stack space (-1).
+ |.endif
+ | cmp PC, L:RB->maxstack; ja ->fff_fallback
+ | mov L:RB->top, PC
+ |
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ |.if resume
+ | add BASE, 8 // Keep resumed thread in stack for GC.
+ |.endif
+ | mov L:RB->top, BASE
+ |.if resume
+ | lea RB, [BASE+NARGS:RD*8-24] // RB = end of source for stack move.
+ |.else
+ | lea RB, [BASE+NARGS:RD*8-16] // RB = end of source for stack move.
+ |.endif
+ | sub RB, PC // Relative to PC.
+ |
+ | cmp PC, RA
+ | je >3
+ |2: // Move args to coroutine.
+ | mov RC, [PC+RB]
+ | mov [PC-8], RC
+ | sub PC, 8
+ | cmp PC, RA
+ | jne <2
+ |3:
+ | mov CARG2, RA
+ | mov CARG1, TMP1
+ | call ->vm_resume // (lua_State *L, TValue *base, 0, 0)
+ |
+ | mov L:RB, SAVE_L
+ | mov L:PC, TMP1
+ | mov BASE, L:RB->base
+ | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB
+ | set_vmstate INTERP
+ |
+ | cmp eax, LUA_YIELD
+ | ja >8
+ |4:
+ | mov RA, L:PC->base
+ | mov KBASE, L:PC->top
+ | mov L:PC->top, RA // Clear coroutine stack.
+ | mov PC, KBASE
+ | sub PC, RA
+ | je >6 // No results?
+ | lea RD, [BASE+PC]
+ | shr PCd, 3
+ | cmp RD, L:RB->maxstack
+ | ja >9 // Need to grow stack?
+ |
+ | mov RB, BASE
+ | sub RB, RA
+ |5: // Move results from coroutine.
+ | mov RD, [RA]
+ | mov [RA+RB], RD
+ | add RA, 8
+ | cmp RA, KBASE
+ | jne <5
+ |6:
+ |.if resume
+ | lea RDd, [PCd+2] // nresults+1 = 1 + true + results.
+ | mov_true ITYPE // Prepend true to results.
+ | mov [BASE-8], ITYPE
+ |.else
+ | lea RDd, [PCd+1] // nresults+1 = 1 + results.
+ |.endif
+ |7:
+ | mov PC, SAVE_PC
+ | mov MULTRES, RDd
+ |.if resume
+ | mov RA, -8
+ |.else
+ | xor RAd, RAd
+ |.endif
+ | test PCd, FRAME_TYPE
+ | jz ->BC_RET_Z
+ | jmp ->vm_return
+ |
+ |8: // Coroutine returned with error (at co->top-1).
+ |.if resume
+ | mov_false ITYPE // Prepend false to results.
+ | mov [BASE-8], ITYPE
+ | mov RA, L:PC->top
+ | sub RA, 8
+ | mov L:PC->top, RA // Clear error from coroutine stack.
+ | // Copy error message.
+ | mov RD, [RA]
+ | mov [BASE], RD
+ | mov RDd, 1+2 // nresults+1 = 1 + false + error.
+ | jmp <7
+ |.else
+ | mov CARG2, L:PC
+ | mov CARG1, L:RB
+ | call extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co)
+ | // Error function does not return.
+ |.endif
+ |
+ |9: // Handle stack expansion on return from yield.
+ | mov L:RA, TMP1
+ | mov L:RA->top, KBASE // Undo coroutine stack clearing.
+ | mov CARG2, PC
+ | mov CARG1, L:RB
+ | call extern lj_state_growstack // (lua_State *L, int n)
+ | mov L:PC, TMP1
+ | mov BASE, L:RB->base
+ | jmp <4 // Retry the stack move.
+ |.endmacro
+ |
+ | coroutine_resume_wrap 1 // coroutine.resume
+ | coroutine_resume_wrap 0 // coroutine.wrap
+ |
+ |.ffunc coroutine_yield
+ | mov L:RB, SAVE_L
+ | test aword L:RB->cframe, CFRAME_RESUME
+ | jz ->fff_fallback
+ | mov L:RB->base, BASE
+ | lea RD, [BASE+NARGS:RD*8-8]
+ | mov L:RB->top, RD
+ | xor RDd, RDd
+ | mov aword L:RB->cframe, RD
+ | mov al, LUA_YIELD
+ | mov byte L:RB->status, al
+ | jmp ->vm_leave_unw
+ |
+ |//-- Math library -------------------------------------------------------
+ |
+ | .ffunc_1 math_abs
+ | mov RB, [BASE]
+ |.if DUALNUM
+ | checkint RB, >3
+ | cmp RBd, 0; jns ->fff_resi
+ | neg RBd; js >2
+ |->fff_resbit:
+ |->fff_resi:
+ | setint RB
+ |->fff_resRB:
+ | mov PC, [BASE-8]
+ | mov [BASE-16], RB
+ | jmp ->fff_res1
+ |2:
+ | mov64 RB, U64x(41e00000,00000000) // 2^31.
+ | jmp ->fff_resRB
+ |3:
+ | ja ->fff_fallback
+ |.else
+ | checknum RB, ->fff_fallback
+ |.endif
+ | shl RB, 1
+ | shr RB, 1
+ | mov PC, [BASE-8]
+ | mov [BASE-16], RB
+ | jmp ->fff_res1
+ |
+ |.ffunc_n math_sqrt, sqrtsd
+ |->fff_resxmm0:
+ | mov PC, [BASE-8]
+ | movsd qword [BASE-16], xmm0
+ | // fallthrough
+ |
+ |->fff_res1:
+ | mov RDd, 1+1
+ |->fff_res:
+ | mov MULTRES, RDd
+ |->fff_res_:
+ | test PCd, FRAME_TYPE
+ | jnz >7
+ |5:
+ | cmp PC_RB, RDL // More results expected?
+ | ja >6
+ | // Adjust BASE. KBASE is assumed to be set for the calling frame.
+ | movzx RAd, PC_RA
+ | neg RA
+ | lea BASE, [BASE+RA*8-16] // base = base - (RA+2)*8
+ | ins_next
+ |
+ |6: // Fill up results with nil.
+ | mov aword [BASE+RD*8-24], LJ_TNIL
+ | add RD, 1
+ | jmp <5
+ |
+ |7: // Non-standard return case.
+ | mov RA, -16 // Results start at BASE+RA = BASE-16.
+ | jmp ->vm_return
+ |
+ |.macro math_round, func
+ | .ffunc math_ .. func
+ |.if DUALNUM
+ | mov RB, [BASE]
+ | checknumx RB, ->fff_resRB, je
+ | ja ->fff_fallback
+ |.else
+ | checknumtp [BASE], ->fff_fallback
+ |.endif
+ | movsd xmm0, qword [BASE]
+ | call ->vm_ .. func .. _sse
+ |.if DUALNUM
+ | cvttsd2si RBd, xmm0
+ | cmp RBd, 0x80000000
+ | jne ->fff_resi
+ | cvtsi2sd xmm1, RBd
+ | ucomisd xmm0, xmm1
+ | jp ->fff_resxmm0
+ | je ->fff_resi
+ |.endif
+ | jmp ->fff_resxmm0
+ |.endmacro
+ |
+ | math_round floor
+ | math_round ceil
+ |
+ |.ffunc math_log
+ | cmp NARGS:RDd, 1+1; jne ->fff_fallback // Exactly one argument.
+ | checknumtp [BASE], ->fff_fallback
+ | movsd xmm0, qword [BASE]
+ | mov RB, BASE
+ | call extern log
+ | mov BASE, RB
+ | jmp ->fff_resxmm0
+ |
+ |.macro math_extern, func
+ | .ffunc_n math_ .. func
+ | mov RB, BASE
+ | call extern func
+ | mov BASE, RB
+ | jmp ->fff_resxmm0
+ |.endmacro
+ |
+ |.macro math_extern2, func
+ | .ffunc_nn math_ .. func
+ | mov RB, BASE
+ | call extern func
+ | mov BASE, RB
+ | jmp ->fff_resxmm0
+ |.endmacro
+ |
+ | math_extern log10
+ | math_extern exp
+ | math_extern sin
+ | math_extern cos
+ | math_extern tan
+ | math_extern asin
+ | math_extern acos
+ | math_extern atan
+ | math_extern sinh
+ | math_extern cosh
+ | math_extern tanh
+ | math_extern2 pow
+ | math_extern2 atan2
+ | math_extern2 fmod
+ |
+ |.ffunc_2 math_ldexp
+ | checknumtp [BASE], ->fff_fallback
+ | checknumtp [BASE+8], ->fff_fallback
+ | fld qword [BASE+8]
+ | fld qword [BASE]
+ | fscale
+ | fpop1
+ | mov PC, [BASE-8]
+ | fstp qword [BASE-16]
+ | jmp ->fff_res1
+ |
+ |.ffunc_n math_frexp
+ | mov RB, BASE
+ |.if X64WIN
+ | lea CARG2, TMP1 // Caveat: CARG2 == BASE
+ |.else
+ | lea CARG1, TMP1
+ |.endif
+ | call extern frexp
+ | mov BASE, RB
+ | mov RBd, TMP1d
+ | mov PC, [BASE-8]
+ | movsd qword [BASE-16], xmm0
+ |.if DUALNUM
+ | setint RB
+ | mov [BASE-8], RB
+ |.else
+ | cvtsi2sd xmm1, RBd
+ | movsd qword [BASE-8], xmm1
+ |.endif
+ | mov RDd, 1+2
+ | jmp ->fff_res
+ |
+ |.ffunc_n math_modf
+ | mov RB, BASE
+ |.if X64WIN
+ | lea CARG2, [BASE-16] // Caveat: CARG2 == BASE
+ |.else
+ | lea CARG1, [BASE-16]
+ |.endif
+ | call extern modf
+ | mov BASE, RB
+ | mov PC, [BASE-8]
+ | movsd qword [BASE-8], xmm0
+ | mov RDd, 1+2
+ | jmp ->fff_res
+ |
+ |.macro math_minmax, name, cmovop, sseop
+ | .ffunc_1 name
+ | mov RAd, 2
+ |.if DUALNUM
+ | mov RB, [BASE]
+ | checkint RB, >4
+ |1: // Handle integers.
+ | cmp RAd, RDd; jae ->fff_resRB
+ | mov TMPR, [BASE+RA*8-8]
+ | checkint TMPR, >3
+ | cmp RBd, TMPRd
+ | cmovop RB, TMPR
+ | add RAd, 1
+ | jmp <1
+ |3:
+ | ja ->fff_fallback
+ | // Convert intermediate result to number and continue below.
+ | cvtsi2sd xmm0, RBd
+ | jmp >6
+ |4:
+ | ja ->fff_fallback
+ |.else
+ | checknumtp [BASE], ->fff_fallback
+ |.endif
+ |
+ | movsd xmm0, qword [BASE]
+ |5: // Handle numbers or integers.
+ | cmp RAd, RDd; jae ->fff_resxmm0
+ |.if DUALNUM
+ | mov RB, [BASE+RA*8-8]
+ | checknumx RB, >6, jb
+ | ja ->fff_fallback
+ | cvtsi2sd xmm1, RBd
+ | jmp >7
+ |.else
+ | checknumtp [BASE+RA*8-8], ->fff_fallback
+ |.endif
+ |6:
+ | movsd xmm1, qword [BASE+RA*8-8]
+ |7:
+ | sseop xmm0, xmm1
+ | add RAd, 1
+ | jmp <5
+ |.endmacro
+ |
+ | math_minmax math_min, cmovg, minsd
+ | math_minmax math_max, cmovl, maxsd
+ |
+ |//-- String library -----------------------------------------------------
+ |
+ |.ffunc string_byte // Only handle the 1-arg case here.
+ | cmp NARGS:RDd, 1+1; jne ->fff_fallback
+ | mov STR:RB, [BASE]
+ | checkstr STR:RB, ->fff_fallback
+ | mov PC, [BASE-8]
+ | cmp dword STR:RB->len, 1
+ | jb ->fff_res0 // Return no results for empty string.
+ | movzx RBd, byte STR:RB[1]
+ |.if DUALNUM
+ | jmp ->fff_resi
+ |.else
+ | cvtsi2sd xmm0, RBd; jmp ->fff_resxmm0
+ |.endif
+ |
+ |.ffunc string_char // Only handle the 1-arg case here.
+ | ffgccheck
+ | cmp NARGS:RDd, 1+1; jne ->fff_fallback // *Exactly* 1 arg.
+ |.if DUALNUM
+ | mov RB, [BASE]
+ | checkint RB, ->fff_fallback
+ |.else
+ | checknumtp [BASE], ->fff_fallback
+ | cvttsd2si RBd, qword [BASE]
+ |.endif
+ | cmp RBd, 255; ja ->fff_fallback
+ | mov TMP1d, RBd
+ | mov TMPRd, 1
+ | lea RD, TMP1 // Points to stack. Little-endian.
+ |->fff_newstr:
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov CARG3d, TMPRd // Zero-extended to size_t.
+ | mov CARG2, RD
+ | mov CARG1, L:RB
+ | mov SAVE_PC, PC
+ | call extern lj_str_new // (lua_State *L, char *str, size_t l)
+ |->fff_resstr:
+ | // GCstr * returned in eax (RD).
+ | mov BASE, L:RB->base
+ | mov PC, [BASE-8]
+ | settp STR:RD, LJ_TSTR
+ | mov [BASE-16], STR:RD
+ | jmp ->fff_res1
+ |
+ |.ffunc string_sub
+ | ffgccheck
+ | mov TMPRd, -1
+ | cmp NARGS:RDd, 1+2; jb ->fff_fallback
+ | jna >1
+ |.if DUALNUM
+ | mov TMPR, [BASE+16]
+ | checkint TMPR, ->fff_fallback
+ |.else
+ | checknumtp [BASE+16], ->fff_fallback
+ | cvttsd2si TMPRd, qword [BASE+16]
+ |.endif
+ |1:
+ | mov STR:RB, [BASE]
+ | checkstr STR:RB, ->fff_fallback
+ |.if DUALNUM
+ | mov ITYPE, [BASE+8]
+ | mov RAd, ITYPEd // Must clear hiword for lea below.
+ | sar ITYPE, 47
+ | cmp ITYPEd, LJ_TISNUM
+ | jne ->fff_fallback
+ |.else
+ | checknumtp [BASE+8], ->fff_fallback
+ | cvttsd2si RAd, qword [BASE+8]
+ |.endif
+ | mov RCd, STR:RB->len
+ | cmp RCd, TMPRd // len < end? (unsigned compare)
+ | jb >5
+ |2:
+ | test RAd, RAd // start <= 0?
+ | jle >7
+ |3:
+ | sub TMPRd, RAd // start > end?
+ | jl ->fff_emptystr
+ | lea RD, [STR:RB+RAd+#STR-1]
+ | add TMPRd, 1
+ |4:
+ | jmp ->fff_newstr
+ |
+ |5: // Negative end or overflow.
+ | jl >6
+ | lea TMPRd, [TMPRd+RCd+1] // end = end+(len+1)
+ | jmp <2
+ |6: // Overflow.
+ | mov TMPRd, RCd // end = len
+ | jmp <2
+ |
+ |7: // Negative start or underflow.
+ | je >8
+ | add RAd, RCd // start = start+(len+1)
+ | add RAd, 1
+ | jg <3 // start > 0?
+ |8: // Underflow.
+ | mov RAd, 1 // start = 1
+ | jmp <3
+ |
+ |->fff_emptystr: // Range underflow.
+ | xor TMPRd, TMPRd // Zero length. Any ptr in RD is ok.
+ | jmp <4
+ |
+ |.macro ffstring_op, name
+ | .ffunc_1 string_ .. name
+ | ffgccheck
+ |.if X64WIN
+ | mov STR:TMPR, [BASE]
+ | checkstr STR:TMPR, ->fff_fallback
+ |.else
+ | mov STR:CARG2, [BASE]
+ | checkstr STR:CARG2, ->fff_fallback
+ |.endif
+ | mov L:RB, SAVE_L
+ | lea SBUF:CARG1, [DISPATCH+DISPATCH_GL(tmpbuf)]
+ | mov L:RB->base, BASE
+ |.if X64WIN
+ | mov STR:CARG2, STR:TMPR // Caveat: CARG2 == BASE
+ |.endif
+ | mov RC, SBUF:CARG1->b
+ | mov SBUF:CARG1->L, L:RB
+ | mov SBUF:CARG1->w, RC
+ | mov SAVE_PC, PC
+ | call extern lj_buf_putstr_ .. name
+ | mov CARG1, rax
+ | call extern lj_buf_tostr
+ | jmp ->fff_resstr
+ |.endmacro
+ |
+ |ffstring_op reverse
+ |ffstring_op lower
+ |ffstring_op upper
+ |
+ |//-- Bit library --------------------------------------------------------
+ |
+ |.macro .ffunc_bit, name, kind, fdef
+ | fdef name
+ |.if kind == 2
+ | sseconst_tobit xmm1, RB
+ |.endif
+ |.if DUALNUM
+ | mov RB, [BASE]
+ | checkint RB, >1
+ |.if kind > 0
+ | jmp >2
+ |.else
+ | jmp ->fff_resbit
+ |.endif
+ |1:
+ | ja ->fff_fallback
+ | movd xmm0, RB
+ |.else
+ | checknumtp [BASE], ->fff_fallback
+ | movsd xmm0, qword [BASE]
+ |.endif
+ |.if kind < 2
+ | sseconst_tobit xmm1, RB
+ |.endif
+ | addsd xmm0, xmm1
+ | movd RBd, xmm0
+ |2:
+ |.endmacro
+ |
+ |.macro .ffunc_bit, name, kind
+ | .ffunc_bit name, kind, .ffunc_1
+ |.endmacro
+ |
+ |.ffunc_bit bit_tobit, 0
+ | jmp ->fff_resbit
+ |
+ |.macro .ffunc_bit_op, name, ins
+ | .ffunc_bit name, 2
+ | mov TMPRd, NARGS:RDd // Save for fallback.
+ | lea RD, [BASE+NARGS:RD*8-16]
+ |1:
+ | cmp RD, BASE
+ | jbe ->fff_resbit
+ |.if DUALNUM
+ | mov RA, [RD]
+ | checkint RA, >2
+ | ins RBd, RAd
+ | sub RD, 8
+ | jmp <1
+ |2:
+ | ja ->fff_fallback_bit_op
+ | movd xmm0, RA
+ |.else
+ | checknumtp [RD], ->fff_fallback_bit_op
+ | movsd xmm0, qword [RD]
+ |.endif
+ | addsd xmm0, xmm1
+ | movd RAd, xmm0
+ | ins RBd, RAd
+ | sub RD, 8
+ | jmp <1
+ |.endmacro
+ |
+ |.ffunc_bit_op bit_band, and
+ |.ffunc_bit_op bit_bor, or
+ |.ffunc_bit_op bit_bxor, xor
+ |
+ |.ffunc_bit bit_bswap, 1
+ | bswap RBd
+ | jmp ->fff_resbit
+ |
+ |.ffunc_bit bit_bnot, 1
+ | not RBd
+ |.if DUALNUM
+ | jmp ->fff_resbit
+ |.else
+ |->fff_resbit:
+ | cvtsi2sd xmm0, RBd
+ | jmp ->fff_resxmm0
+ |.endif
+ |
+ |->fff_fallback_bit_op:
+ | mov NARGS:RDd, TMPRd // Restore for fallback
+ | jmp ->fff_fallback
+ |
+ |.macro .ffunc_bit_sh, name, ins
+ |.if DUALNUM
+ | .ffunc_bit name, 1, .ffunc_2
+ | // Note: no inline conversion from number for 2nd argument!
+ | mov RA, [BASE+8]
+ | checkint RA, ->fff_fallback
+ |.else
+ | .ffunc_nn name
+ | sseconst_tobit xmm2, RB
+ | addsd xmm0, xmm2
+ | addsd xmm1, xmm2
+ | movd RBd, xmm0
+ | movd RAd, xmm1
+ |.endif
+ | ins RBd, cl // Assumes RA is ecx.
+ | jmp ->fff_resbit
+ |.endmacro
+ |
+ |.ffunc_bit_sh bit_lshift, shl
+ |.ffunc_bit_sh bit_rshift, shr
+ |.ffunc_bit_sh bit_arshift, sar
+ |.ffunc_bit_sh bit_rol, rol
+ |.ffunc_bit_sh bit_ror, ror
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->fff_fallback_2:
+ | mov NARGS:RDd, 1+2 // Other args are ignored, anyway.
+ | jmp ->fff_fallback
+ |->fff_fallback_1:
+ | mov NARGS:RDd, 1+1 // Other args are ignored, anyway.
+ |->fff_fallback: // Call fast function fallback handler.
+ | // BASE = new base, RD = nargs+1
+ | mov L:RB, SAVE_L
+ | mov PC, [BASE-8] // Fallback may overwrite PC.
+ | mov SAVE_PC, PC // Redundant (but a defined value).
+ | mov L:RB->base, BASE
+ | lea RD, [BASE+NARGS:RD*8-8]
+ | lea RA, [RD+8*LUA_MINSTACK] // Ensure enough space for handler.
+ | mov L:RB->top, RD
+ | mov CFUNC:RD, [BASE-16]
+ | cleartp CFUNC:RD
+ | cmp RA, L:RB->maxstack
+ | ja >5 // Need to grow stack.
+ | mov CARG1, L:RB
+ | call aword CFUNC:RD->f // (lua_State *L)
+ | mov BASE, L:RB->base
+ | // Either throws an error, or recovers and returns -1, 0 or nresults+1.
+ | test RDd, RDd; jg ->fff_res // Returned nresults+1?
+ |1:
+ | mov RA, L:RB->top
+ | sub RA, BASE
+ | shr RAd, 3
+ | test RDd, RDd
+ | lea NARGS:RDd, [RAd+1]
+ | mov LFUNC:RB, [BASE-16]
+ | jne ->vm_call_tail // Returned -1?
+ | cleartp LFUNC:RB
+ | ins_callt // Returned 0: retry fast path.
+ |
+ |// Reconstruct previous base for vmeta_call during tailcall.
+ |->vm_call_tail:
+ | mov RA, BASE
+ | test PCd, FRAME_TYPE
+ | jnz >3
+ | movzx RBd, PC_RA
+ | neg RB
+ | lea BASE, [BASE+RB*8-16] // base = base - (RB+2)*8
+ | jmp ->vm_call_dispatch // Resolve again for tailcall.
+ |3:
+ | mov RB, PC
+ | and RB, -8
+ | sub BASE, RB
+ | jmp ->vm_call_dispatch // Resolve again for tailcall.
+ |
+ |5: // Grow stack for fallback handler.
+ | mov CARG2d, LUA_MINSTACK
+ | mov CARG1, L:RB
+ | call extern lj_state_growstack // (lua_State *L, int n)
+ | mov BASE, L:RB->base
+ | xor RDd, RDd // Simulate a return 0.
+ | jmp <1 // Dumb retry (goes through ff first).
+ |
+ |->fff_gcstep: // Call GC step function.
+ | // BASE = new base, RD = nargs+1
+ | pop RB // Must keep stack at same level.
+ | mov TMP1, RB // Save return address
+ | mov L:RB, SAVE_L
+ | mov SAVE_PC, PC // Redundant (but a defined value).
+ | mov L:RB->base, BASE
+ | lea RD, [BASE+NARGS:RD*8-8]
+ | mov CARG1, L:RB
+ | mov L:RB->top, RD
+ | call extern lj_gc_step // (lua_State *L)
+ | mov BASE, L:RB->base
+ | mov RD, L:RB->top
+ | sub RD, BASE
+ | shr RDd, 3
+ | add NARGS:RDd, 1
+ | mov RB, TMP1
+ | push RB // Restore return address.
+ | ret
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Special dispatch targets -------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_record: // Dispatch target for recording phase.
+ |.if JIT
+ | movzx RDd, byte [DISPATCH+DISPATCH_GL(hookmask)]
+ | test RDL, HOOK_VMEVENT // No recording while in vmevent.
+ | jnz >5
+ | // Decrement the hookcount for consistency, but always do the call.
+ | test RDL, HOOK_ACTIVE
+ | jnz >1
+ | test RDL, LUA_MASKLINE|LUA_MASKCOUNT
+ | jz >1
+ | dec dword [DISPATCH+DISPATCH_GL(hookcount)]
+ | jmp >1
+ |.endif
+ |
+ |->vm_rethook: // Dispatch target for return hooks.
+ | movzx RDd, byte [DISPATCH+DISPATCH_GL(hookmask)]
+ | test RDL, HOOK_ACTIVE // Hook already active?
+ | jnz >5
+ | jmp >1
+ |
+ |->vm_inshook: // Dispatch target for instr/line hooks.
+ | movzx RDd, byte [DISPATCH+DISPATCH_GL(hookmask)]
+ | test RDL, HOOK_ACTIVE // Hook already active?
+ | jnz >5
+ |
+ | test RDL, LUA_MASKLINE|LUA_MASKCOUNT
+ | jz >5
+ | dec dword [DISPATCH+DISPATCH_GL(hookcount)]
+ | jz >1
+ | test RDL, LUA_MASKLINE
+ | jz >5
+ |1:
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov CARG2, PC // Caveat: CARG2 == BASE
+ | mov CARG1, L:RB
+ | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
+ | call extern lj_dispatch_ins // (lua_State *L, const BCIns *pc)
+ |3:
+ | mov BASE, L:RB->base
+ |4:
+ | movzx RAd, PC_RA
+ |5:
+ | movzx OP, PC_OP
+ | movzx RDd, PC_RD
+ | jmp aword [DISPATCH+OP*8+GG_DISP2STATIC] // Re-dispatch to static ins.
+ |
+ |->cont_hook: // Continue from hook yield.
+ | add PC, 4
+ | mov RA, [RB-40]
+ | mov MULTRES, RAd // Restore MULTRES for *M ins.
+ | jmp <4
+ |
+ |->vm_hotloop: // Hot loop counter underflow.
+ |.if JIT
+ | mov LFUNC:RB, [BASE-16] // Same as curr_topL(L).
+ | cleartp LFUNC:RB
+ | mov RB, LFUNC:RB->pc
+ | movzx RDd, byte [RB+PC2PROTO(framesize)]
+ | lea RD, [BASE+RD*8]
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov L:RB->top, RD
+ | mov CARG2, PC
+ | lea CARG1, [DISPATCH+GG_DISP2J]
+ | mov aword [DISPATCH+DISPATCH_J(L)], L:RB
+ | mov SAVE_PC, PC
+ | call extern lj_trace_hot // (jit_State *J, const BCIns *pc)
+ | jmp <3
+ |.endif
+ |
+ |->vm_callhook: // Dispatch target for call hooks.
+ | mov SAVE_PC, PC
+ |.if JIT
+ | jmp >1
+ |.endif
+ |
+ |->vm_hotcall: // Hot call counter underflow.
+ |.if JIT
+ | mov SAVE_PC, PC
+ | or PC, 1 // Marker for hot call.
+ |1:
+ |.endif
+ | lea RD, [BASE+NARGS:RD*8-8]
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov L:RB->top, RD
+ | mov CARG2, PC
+ | mov CARG1, L:RB
+ | call extern lj_dispatch_call // (lua_State *L, const BCIns *pc)
+ | // ASMFunction returned in eax/rax (RD).
+ | mov SAVE_PC, 0 // Invalidate for subsequent line hook.
+ |.if JIT
+ | and PC, -2
+ |.endif
+ | mov BASE, L:RB->base
+ | mov RA, RD
+ | mov RD, L:RB->top
+ | sub RD, BASE
+ | mov RB, RA
+ | movzx RAd, PC_RA
+ | shr RDd, 3
+ | add NARGS:RDd, 1
+ | jmp RB
+ |
+ |->cont_stitch: // Trace stitching.
+ |.if JIT
+ | // BASE = base, RC = result, RB = mbase
+ | mov TRACE:ITYPE, [RB-40] // Save previous trace.
+ | cleartp TRACE:ITYPE
+ | mov TMPRd, MULTRES
+ | movzx RAd, PC_RA
+ | lea RA, [BASE+RA*8] // Call base.
+ | sub TMPRd, 1
+ | jz >2
+ |1: // Move results down.
+ | mov RB, [RC]
+ | mov [RA], RB
+ | add RC, 8
+ | add RA, 8
+ | sub TMPRd, 1
+ | jnz <1
+ |2:
+ | movzx RCd, PC_RA
+ | movzx RBd, PC_RB
+ | add RC, RB
+ | lea RC, [BASE+RC*8-8]
+ |3:
+ | cmp RC, RA
+ | ja >9 // More results wanted?
+ |
+ | test TRACE:ITYPE, TRACE:ITYPE
+ | jz ->cont_nop
+ | movzx RBd, word TRACE:ITYPE->traceno
+ | movzx RDd, word TRACE:ITYPE->link
+ | cmp RDd, RBd
+ | je ->cont_nop // Blacklisted.
+ | test RDd, RDd
+ | jne =>BC_JLOOP // Jump to stitched trace.
+ |
+ | // Stitch a new trace to the previous trace.
+ | mov [DISPATCH+DISPATCH_J(exitno)], RB
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov CARG2, PC
+ | lea CARG1, [DISPATCH+GG_DISP2J]
+ | mov aword [DISPATCH+DISPATCH_J(L)], L:RB
+ | call extern lj_dispatch_stitch // (jit_State *J, const BCIns *pc)
+ | mov BASE, L:RB->base
+ | jmp ->cont_nop
+ |
+ |9: // Fill up results with nil.
+ | mov aword [RA], LJ_TNIL
+ | add RA, 8
+ | jmp <3
+ |.endif
+ |
+ |->vm_profhook: // Dispatch target for profiler hook.
+#if LJ_HASPROFILE
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov CARG2, PC // Caveat: CARG2 == BASE
+ | mov CARG1, L:RB
+ | call extern lj_dispatch_profile // (lua_State *L, const BCIns *pc)
+ | mov BASE, L:RB->base
+ | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction.
+ | sub PC, 4
+ | jmp ->cont_nop
+#endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Trace exit handler -------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// Called from an exit stub with the exit number on the stack.
+ |// The 16 bit exit number is stored with two (sign-extended) push imm8.
+ |->vm_exit_handler:
+ |.if JIT
+ | push r13; push r12
+ | push r11; push r10; push r9; push r8
+ | push rdi; push rsi; push rbp; lea rbp, [rsp+88]; push rbp
+ | push rbx; push rdx; push rcx; push rax
+ | movzx RCd, byte [rbp-8] // Reconstruct exit number.
+ | mov RCH, byte [rbp-16]
+ | mov [rbp-8], r15; mov [rbp-16], r14
+ | // DISPATCH is preserved on-trace in LJ_GC64 mode.
+ | mov RAd, [DISPATCH+DISPATCH_GL(vmstate)] // Get trace number.
+ | set_vmstate EXIT
+ | mov [DISPATCH+DISPATCH_J(exitno)], RCd
+ | mov [DISPATCH+DISPATCH_J(parent)], RAd
+ |.if X64WIN
+ | sub rsp, 16*8+4*8 // Room for SSE regs + save area.
+ |.else
+ | sub rsp, 16*8 // Room for SSE regs.
+ |.endif
+ | add rbp, -128
+ | movsd qword [rbp-8], xmm15; movsd qword [rbp-16], xmm14
+ | movsd qword [rbp-24], xmm13; movsd qword [rbp-32], xmm12
+ | movsd qword [rbp-40], xmm11; movsd qword [rbp-48], xmm10
+ | movsd qword [rbp-56], xmm9; movsd qword [rbp-64], xmm8
+ | movsd qword [rbp-72], xmm7; movsd qword [rbp-80], xmm6
+ | movsd qword [rbp-88], xmm5; movsd qword [rbp-96], xmm4
+ | movsd qword [rbp-104], xmm3; movsd qword [rbp-112], xmm2
+ | movsd qword [rbp-120], xmm1; movsd qword [rbp-128], xmm0
+ | // Caveat: RB is rbp.
+ | mov L:RB, [DISPATCH+DISPATCH_GL(cur_L)]
+ | mov BASE, [DISPATCH+DISPATCH_GL(jit_base)]
+ | mov aword [DISPATCH+DISPATCH_J(L)], L:RB
+ | mov L:RB->base, BASE
+ |.if X64WIN
+ | lea CARG2, [rsp+4*8]
+ |.else
+ | mov CARG2, rsp
+ |.endif
+ | lea CARG1, [DISPATCH+GG_DISP2J]
+ | mov qword [DISPATCH+DISPATCH_GL(jit_base)], 0
+ | call extern lj_trace_exit // (jit_State *J, ExitState *ex)
+ | // MULTRES or negated error code returned in eax (RD).
+ | mov RA, L:RB->cframe
+ | and RA, CFRAME_RAWMASK
+ | mov [RA+CFRAME_OFS_L], L:RB // Set SAVE_L (on-trace resume/yield).
+ | mov BASE, L:RB->base
+ | mov PC, [RA+CFRAME_OFS_PC] // Get SAVE_PC.
+ | jmp >1
+ |.endif
+ |->vm_exit_interp:
+ | // RD = MULTRES or negated error code, BASE, PC and DISPATCH set.
+ |.if JIT
+ | // Restore additional callee-save registers only used in compiled code.
+ |.if X64WIN
+ | lea RA, [rsp+10*16+4*8]
+ |1:
+ | movdqa xmm15, [RA-10*16]
+ | movdqa xmm14, [RA-9*16]
+ | movdqa xmm13, [RA-8*16]
+ | movdqa xmm12, [RA-7*16]
+ | movdqa xmm11, [RA-6*16]
+ | movdqa xmm10, [RA-5*16]
+ | movdqa xmm9, [RA-4*16]
+ | movdqa xmm8, [RA-3*16]
+ | movdqa xmm7, [RA-2*16]
+ | mov rsp, RA // Reposition stack to C frame.
+ | movdqa xmm6, [RA-1*16]
+ | mov r15, CSAVE_1
+ | mov r14, CSAVE_2
+ | mov r13, CSAVE_3
+ | mov r12, CSAVE_4
+ |.else
+ | lea RA, [rsp+16]
+ |1:
+ | mov r13, [RA-8]
+ | mov r12, [RA]
+ | mov rsp, RA // Reposition stack to C frame.
+ |.endif
+ | cmp RDd, -LUA_ERRERR; jae >9 // Check for error from exit.
+ | mov L:RB, SAVE_L
+ | mov MULTRES, RDd
+ | mov LFUNC:KBASE, [BASE-16]
+ | cleartp LFUNC:KBASE
+ | mov KBASE, LFUNC:KBASE->pc
+ | mov KBASE, [KBASE+PC2PROTO(k)]
+ | mov L:RB->base, BASE
+ | mov qword [DISPATCH+DISPATCH_GL(jit_base)], 0
+ | set_vmstate INTERP
+ | // Modified copy of ins_next which handles function header dispatch, too.
+ | mov RCd, [PC]
+ | movzx RAd, RCH
+ | movzx OP, RCL
+ | add PC, 4
+ | shr RCd, 16
+ | cmp MULTRES, -17 // Static dispatch?
+ | je >5
+ | cmp OP, BC_FUNCF // Function header?
+ | jb >3
+ | cmp OP, BC_FUNCC+2 // Fast function?
+ | jae >4
+ |2:
+ | mov RCd, MULTRES // RC/RD holds nres+1.
+ |3:
+ | jmp aword [DISPATCH+OP*8]
+ |
+ |4: // Check frame below fast function.
+ | mov RC, [BASE-8]
+ | test RCd, FRAME_TYPE
+ | jnz <2 // Trace stitching continuation?
+ | // Otherwise set KBASE for Lua function below fast function.
+ | movzx RCd, byte [RC-3]
+ | neg RC
+ | mov LFUNC:KBASE, [BASE+RC*8-32]
+ | cleartp LFUNC:KBASE
+ | mov KBASE, LFUNC:KBASE->pc
+ | mov KBASE, [KBASE+PC2PROTO(k)]
+ | jmp <2
+ |
+ |5: // Dispatch to static entry of original ins replaced by BC_JLOOP.
+ | mov RA, [DISPATCH+DISPATCH_J(trace)]
+ | mov TRACE:RA, [RA+RD*8]
+ | mov RCd, TRACE:RA->startins
+ | movzx RAd, RCH
+ | movzx OP, RCL
+ | shr RCd, 16
+ | jmp aword [DISPATCH+OP*8+GG_DISP2STATIC]
+ |
+ |9: // Rethrow error from the right C frame.
+ | mov CARG2d, RDd
+ | mov CARG1, L:RB
+ | neg CARG2d
+ | call extern lj_err_trace // (lua_State *L, int errcode)
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Math helper functions ----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// FP value rounding. Called by math.floor/math.ceil fast functions
+ |// and from JIT code. arg/ret is xmm0. xmm0-xmm3 and RD (eax) modified.
+ |.macro vm_round, name, mode, cond
+ |->name:
+ |->name .. _sse:
+ | sseconst_abs xmm2, RD
+ | sseconst_2p52 xmm3, RD
+ | movaps xmm1, xmm0
+ | andpd xmm1, xmm2 // |x|
+ | ucomisd xmm3, xmm1 // No truncation if 2^52 <= |x|.
+ | jbe >1
+ | andnpd xmm2, xmm0 // Isolate sign bit.
+ |.if mode == 2 // trunc(x)?
+ | movaps xmm0, xmm1
+ | addsd xmm1, xmm3 // (|x| + 2^52) - 2^52
+ | subsd xmm1, xmm3
+ | sseconst_1 xmm3, RD
+ | cmpsd xmm0, xmm1, 1 // |x| < result?
+ | andpd xmm0, xmm3
+ | subsd xmm1, xmm0 // If yes, subtract -1.
+ | orpd xmm1, xmm2 // Merge sign bit back in.
+ |.else
+ | addsd xmm1, xmm3 // (|x| + 2^52) - 2^52
+ | subsd xmm1, xmm3
+ | orpd xmm1, xmm2 // Merge sign bit back in.
+ | sseconst_1 xmm3, RD
+ | .if mode == 1 // ceil(x)?
+ | cmpsd xmm0, xmm1, 6 // x > result?
+ | andpd xmm0, xmm3
+ | addsd xmm1, xmm0 // If yes, add 1.
+ | orpd xmm1, xmm2 // Merge sign bit back in (again).
+ | .else // floor(x)?
+ | cmpsd xmm0, xmm1, 1 // x < result?
+ | andpd xmm0, xmm3
+ | subsd xmm1, xmm0 // If yes, subtract 1.
+ | .endif
+ |.endif
+ | movaps xmm0, xmm1
+ |1:
+ | ret
+ |.endmacro
+ |
+ | vm_round vm_floor, 0, 1
+ | vm_round vm_ceil, 1, JIT
+ | vm_round vm_trunc, 2, JIT
+ |
+ |// FP modulo x%y. Called by BC_MOD* and vm_arith.
+ |->vm_mod:
+ |// Args in xmm0/xmm1, return value in xmm0.
+ |// Caveat: xmm0-xmm5 and RC (eax) modified!
+ | movaps xmm5, xmm0
+ | divsd xmm0, xmm1
+ | sseconst_abs xmm2, RD
+ | sseconst_2p52 xmm3, RD
+ | movaps xmm4, xmm0
+ | andpd xmm4, xmm2 // |x/y|
+ | ucomisd xmm3, xmm4 // No truncation if 2^52 <= |x/y|.
+ | jbe >1
+ | andnpd xmm2, xmm0 // Isolate sign bit.
+ | addsd xmm4, xmm3 // (|x/y| + 2^52) - 2^52
+ | subsd xmm4, xmm3
+ | orpd xmm4, xmm2 // Merge sign bit back in.
+ | sseconst_1 xmm2, RD
+ | cmpsd xmm0, xmm4, 1 // x/y < result?
+ | andpd xmm0, xmm2
+ | subsd xmm4, xmm0 // If yes, subtract 1.0.
+ | movaps xmm0, xmm5
+ | mulsd xmm1, xmm4
+ | subsd xmm0, xmm1
+ | ret
+ |1:
+ | mulsd xmm1, xmm0
+ | movaps xmm0, xmm5
+ | subsd xmm0, xmm1
+ | ret
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Miscellaneous functions --------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// int lj_vm_cpuid(uint32_t f, uint32_t res[4])
+ |->vm_cpuid:
+ | mov eax, CARG1d
+ | .if X64WIN; push rsi; mov rsi, CARG2; .endif
+ | push rbx
+ | xor ecx, ecx
+ | cpuid
+ | mov [rsi], eax
+ | mov [rsi+4], ebx
+ | mov [rsi+8], ecx
+ | mov [rsi+12], edx
+ | pop rbx
+ | .if X64WIN; pop rsi; .endif
+ | ret
+ |
+ |.define NEXT_TAB, TAB:CARG1
+ |.define NEXT_IDX, CARG2d
+ |.define NEXT_IDXa, CARG2
+ |.define NEXT_PTR, RC
+ |.define NEXT_PTRd, RCd
+ |.define NEXT_TMP, CARG3
+ |.define NEXT_ASIZE, CARG4d
+ |.macro NEXT_RES_IDXL, op2; lea edx, [NEXT_IDX+op2]; .endmacro
+ |.if X64WIN
+ |.define NEXT_RES_PTR, [rsp+aword*5]
+ |.macro NEXT_RES_IDX, op2; add NEXT_IDX, op2; .endmacro
+ |.else
+ |.define NEXT_RES_PTR, [rsp+aword*1]
+ |.macro NEXT_RES_IDX, op2; lea edx, [NEXT_IDX+op2]; .endmacro
+ |.endif
+ |
+ |// TValue *lj_vm_next(GCtab *t, uint32_t idx)
+ |// Next idx returned in edx.
+ |->vm_next:
+ |.if JIT
+ | mov NEXT_ASIZE, NEXT_TAB->asize
+ |1: // Traverse array part.
+ | cmp NEXT_IDX, NEXT_ASIZE; jae >5
+ | mov NEXT_TMP, NEXT_TAB->array
+ | mov NEXT_TMP, qword [NEXT_TMP+NEXT_IDX*8]
+ | cmp NEXT_TMP, LJ_TNIL; je >2
+ | lea NEXT_PTR, NEXT_RES_PTR
+ | mov qword [NEXT_PTR], NEXT_TMP
+ |.if DUALNUM
+ | setint NEXT_TMP, NEXT_IDXa
+ | mov qword [NEXT_PTR+qword*1], NEXT_TMP
+ |.else
+ | cvtsi2sd xmm0, NEXT_IDX
+ | movsd qword [NEXT_PTR+qword*1], xmm0
+ |.endif
+ | NEXT_RES_IDX 1
+ | ret
+ |2: // Skip holes in array part.
+ | add NEXT_IDX, 1
+ | jmp <1
+ |
+ |5: // Traverse hash part.
+ | sub NEXT_IDX, NEXT_ASIZE
+ |6:
+ | cmp NEXT_IDX, NEXT_TAB->hmask; ja >9
+ | imul NEXT_PTRd, NEXT_IDX, #NODE
+ | add NODE:NEXT_PTR, NEXT_TAB->node
+ | cmp qword NODE:NEXT_PTR->val, LJ_TNIL; je >7
+ | NEXT_RES_IDXL NEXT_ASIZE+1
+ | ret
+ |7: // Skip holes in hash part.
+ | add NEXT_IDX, 1
+ | jmp <6
+ |
+ |9: // End of iteration. Set the key to nil (not the value).
+ | NEXT_RES_IDX NEXT_ASIZE
+ | lea NEXT_PTR, NEXT_RES_PTR
+ | mov qword [NEXT_PTR+qword*1], LJ_TNIL
+ | ret
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Assertions ---------------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->assert_bad_for_arg_type:
+#ifdef LUA_USE_ASSERT
+ | int3
+#endif
+ | int3
+ |
+ |//-----------------------------------------------------------------------
+ |//-- FFI helper functions -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// Handler for callback functions. Callback slot number in ah/al.
+ |->vm_ffi_callback:
+ |.if FFI
+ |.type CTSTATE, CTState, PC
+ | saveregs_ // ebp/rbp already saved. ebp now holds global_State *.
+ | lea DISPATCH, [ebp+GG_G2DISP]
+ | mov CTSTATE, GL:ebp->ctype_state
+ | movzx eax, ax
+ | mov CTSTATE->cb.slot, eax
+ | mov CTSTATE->cb.gpr[0], CARG1
+ | mov CTSTATE->cb.gpr[1], CARG2
+ | mov CTSTATE->cb.gpr[2], CARG3
+ | mov CTSTATE->cb.gpr[3], CARG4
+ | movsd qword CTSTATE->cb.fpr[0], xmm0
+ | movsd qword CTSTATE->cb.fpr[1], xmm1
+ | movsd qword CTSTATE->cb.fpr[2], xmm2
+ | movsd qword CTSTATE->cb.fpr[3], xmm3
+ |.if X64WIN
+ | lea rax, [rsp+CFRAME_SIZE+4*8]
+ |.else
+ | lea rax, [rsp+CFRAME_SIZE]
+ | mov CTSTATE->cb.gpr[4], CARG5
+ | mov CTSTATE->cb.gpr[5], CARG6
+ | movsd qword CTSTATE->cb.fpr[4], xmm4
+ | movsd qword CTSTATE->cb.fpr[5], xmm5
+ | movsd qword CTSTATE->cb.fpr[6], xmm6
+ | movsd qword CTSTATE->cb.fpr[7], xmm7
+ |.endif
+ | mov CTSTATE->cb.stack, rax
+ | mov CARG2, rsp
+ | mov SAVE_PC, CTSTATE // Any value outside of bytecode is ok.
+ | mov CARG1, CTSTATE
+ | call extern lj_ccallback_enter // (CTState *cts, void *cf)
+ | // lua_State * returned in eax (RD).
+ | set_vmstate INTERP
+ | mov BASE, L:RD->base
+ | mov RD, L:RD->top
+ | sub RD, BASE
+ | mov LFUNC:RB, [BASE-16]
+ | cleartp LFUNC:RB
+ | shr RD, 3
+ | add RD, 1
+ | ins_callt
+ |.endif
+ |
+ |->cont_ffi_callback: // Return from FFI callback.
+ |.if FFI
+ | mov L:RA, SAVE_L
+ | mov CTSTATE, [DISPATCH+DISPATCH_GL(ctype_state)]
+ | mov aword CTSTATE->L, L:RA
+ | mov L:RA->base, BASE
+ | mov L:RA->top, RB
+ | mov CARG1, CTSTATE
+ | mov CARG2, RC
+ | call extern lj_ccallback_leave // (CTState *cts, TValue *o)
+ | mov rax, CTSTATE->cb.gpr[0]
+ | movsd xmm0, qword CTSTATE->cb.fpr[0]
+ | jmp ->vm_leave_unw
+ |.endif
+ |
+ |->vm_ffi_call: // Call C function via FFI.
+ | // Caveat: needs special frame unwinding, see below.
+ |.if FFI
+ | .type CCSTATE, CCallState, rbx
+ | push rbp; mov rbp, rsp; push rbx; mov CCSTATE, CARG1
+ |
+ | // Readjust stack.
+ | mov eax, CCSTATE->spadj
+ | sub rsp, rax
+ |
+ | // Copy stack slots.
+ | movzx ecx, byte CCSTATE->nsp
+ | sub ecx, 8
+ | js >2
+ |1:
+ | mov rax, [CCSTATE+rcx+offsetof(CCallState, stack)]
+ | mov [rsp+rcx+CCALL_SPS_EXTRA*8], rax
+ | sub ecx, 8
+ | jns <1
+ |2:
+ |
+ | movzx eax, byte CCSTATE->nfpr
+ | mov CARG1, CCSTATE->gpr[0]
+ | mov CARG2, CCSTATE->gpr[1]
+ | mov CARG3, CCSTATE->gpr[2]
+ | mov CARG4, CCSTATE->gpr[3]
+ |.if not X64WIN
+ | mov CARG5, CCSTATE->gpr[4]
+ | mov CARG6, CCSTATE->gpr[5]
+ |.endif
+ | test eax, eax; jz >5
+ | movaps xmm0, CCSTATE->fpr[0]
+ | movaps xmm1, CCSTATE->fpr[1]
+ | movaps xmm2, CCSTATE->fpr[2]
+ | movaps xmm3, CCSTATE->fpr[3]
+ |.if not X64WIN
+ | cmp eax, 4; jbe >5
+ | movaps xmm4, CCSTATE->fpr[4]
+ | movaps xmm5, CCSTATE->fpr[5]
+ | movaps xmm6, CCSTATE->fpr[6]
+ | movaps xmm7, CCSTATE->fpr[7]
+ |.endif
+ |5:
+ |
+ | call aword CCSTATE->func
+ |
+ | mov CCSTATE->gpr[0], rax
+ | movaps CCSTATE->fpr[0], xmm0
+ |.if not X64WIN
+ | mov CCSTATE->gpr[1], rdx
+ | movaps CCSTATE->fpr[1], xmm1
+ |.endif
+ |
+ | mov rbx, [rbp-8]; leave; ret
+ |.endif
+ |// Note: vm_ffi_call must be the last function in this object file!
+ |
+ |//-----------------------------------------------------------------------
+}
+
+/* Generate the code for a single instruction. */
+static void build_ins(BuildCtx *ctx, BCOp op, int defop)
+{
+ int vk = 0;
+ |// Note: aligning all instructions does not pay off.
+ |=>defop:
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ /* Remember: all ops branch for a true comparison, fall through otherwise. */
+
+ |.macro jmp_comp, lt, ge, le, gt, target
+ ||switch (op) {
+ ||case BC_ISLT:
+ | lt target
+ ||break;
+ ||case BC_ISGE:
+ | ge target
+ ||break;
+ ||case BC_ISLE:
+ | le target
+ ||break;
+ ||case BC_ISGT:
+ | gt target
+ ||break;
+ ||default: break; /* Shut up GCC. */
+ ||}
+ |.endmacro
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+ | // RA = src1, RD = src2, JMP with RD = target
+ | ins_AD
+ | mov ITYPE, [BASE+RA*8]
+ | mov RB, [BASE+RD*8]
+ | mov RA, ITYPE
+ | mov RD, RB
+ | sar ITYPE, 47
+ | sar RB, 47
+ |.if DUALNUM
+ | cmp ITYPEd, LJ_TISNUM; jne >7
+ | cmp RBd, LJ_TISNUM; jne >8
+ | add PC, 4
+ | cmp RAd, RDd
+ | jmp_comp jge, jl, jg, jle, >9
+ |6:
+ | movzx RDd, PC_RD
+ | branchPC RD
+ |9:
+ | ins_next
+ |
+ |7: // RA is not an integer.
+ | ja ->vmeta_comp
+ | // RA is a number.
+ | cmp RBd, LJ_TISNUM; jb >1; jne ->vmeta_comp
+ | // RA is a number, RD is an integer.
+ | cvtsi2sd xmm0, RDd
+ | jmp >2
+ |
+ |8: // RA is an integer, RD is not an integer.
+ | ja ->vmeta_comp
+ | // RA is an integer, RD is a number.
+ | cvtsi2sd xmm1, RAd
+ | movd xmm0, RD
+ | jmp >3
+ |.else
+ | cmp ITYPEd, LJ_TISNUM; jae ->vmeta_comp
+ | cmp RBd, LJ_TISNUM; jae ->vmeta_comp
+ |.endif
+ |1:
+ | movd xmm0, RD
+ |2:
+ | movd xmm1, RA
+ |3:
+ | add PC, 4
+ | ucomisd xmm0, xmm1
+ | // Unordered: all of ZF CF PF set, ordered: PF clear.
+ | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't.
+ |.if DUALNUM
+ | jmp_comp jbe, ja, jb, jae, <9
+ | jmp <6
+ |.else
+ | jmp_comp jbe, ja, jb, jae, >1
+ | movzx RDd, PC_RD
+ | branchPC RD
+ |1:
+ | ins_next
+ |.endif
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ vk = op == BC_ISEQV;
+ | ins_AD // RA = src1, RD = src2, JMP with RD = target
+ | mov RB, [BASE+RD*8]
+ | mov ITYPE, [BASE+RA*8]
+ | add PC, 4
+ | mov RD, RB
+ | mov RA, ITYPE
+ | sar RB, 47
+ | sar ITYPE, 47
+ |.if DUALNUM
+ | cmp RBd, LJ_TISNUM; jne >7
+ | cmp ITYPEd, LJ_TISNUM; jne >8
+ | cmp RDd, RAd
+ if (vk) {
+ | jne >9
+ } else {
+ | je >9
+ }
+ | movzx RDd, PC_RD
+ | branchPC RD
+ |9:
+ | ins_next
+ |
+ |7: // RD is not an integer.
+ | ja >5
+ | // RD is a number.
+ | movd xmm1, RD
+ | cmp ITYPEd, LJ_TISNUM; jb >1; jne >5
+ | // RD is a number, RA is an integer.
+ | cvtsi2sd xmm0, RAd
+ | jmp >2
+ |
+ |8: // RD is an integer, RA is not an integer.
+ | ja >5
+ | // RD is an integer, RA is a number.
+ | cvtsi2sd xmm1, RDd
+ | jmp >1
+ |
+ |.else
+ | cmp RBd, LJ_TISNUM; jae >5
+ | cmp ITYPEd, LJ_TISNUM; jae >5
+ | movd xmm1, RD
+ |.endif
+ |1:
+ | movd xmm0, RA
+ |2:
+ | ucomisd xmm0, xmm1
+ |4:
+ iseqne_fp:
+ if (vk) {
+ | jp >2 // Unordered means not equal.
+ | jne >2
+ } else {
+ | jp >2 // Unordered means not equal.
+ | je >1
+ }
+ iseqne_end:
+ if (vk) {
+ |1: // EQ: Branch to the target.
+ | movzx RDd, PC_RD
+ | branchPC RD
+ |2: // NE: Fallthrough to next instruction.
+ |.if not FFI
+ |3:
+ |.endif
+ } else {
+ |.if not FFI
+ |3:
+ |.endif
+ |2: // NE: Branch to the target.
+ | movzx RDd, PC_RD
+ | branchPC RD
+ |1: // EQ: Fallthrough to next instruction.
+ }
+ if (LJ_DUALNUM && (op == BC_ISEQV || op == BC_ISNEV ||
+ op == BC_ISEQN || op == BC_ISNEN)) {
+ | jmp <9
+ } else {
+ | ins_next
+ }
+ |
+ if (op == BC_ISEQV || op == BC_ISNEV) {
+ |5: // Either or both types are not numbers.
+ |.if FFI
+ | cmp RBd, LJ_TCDATA; je ->vmeta_equal_cd
+ | cmp ITYPEd, LJ_TCDATA; je ->vmeta_equal_cd
+ |.endif
+ | cmp RA, RD
+ | je <1 // Same GCobjs or pvalues?
+ | cmp RBd, ITYPEd
+ | jne <2 // Not the same type?
+ | cmp RBd, LJ_TISTABUD
+ | ja <2 // Different objects and not table/ud?
+ |
+ | // Different tables or userdatas. Need to check __eq metamethod.
+ | // Field metatable must be at same offset for GCtab and GCudata!
+ | cleartp TAB:RA
+ | mov TAB:RB, TAB:RA->metatable
+ | test TAB:RB, TAB:RB
+ | jz <2 // No metatable?
+ | test byte TAB:RB->nomm, 1<<MM_eq
+ | jnz <2 // Or 'no __eq' flag set?
+ if (vk) {
+ | xor RBd, RBd // ne = 0
+ } else {
+ | mov RBd, 1 // ne = 1
+ }
+ | jmp ->vmeta_equal // Handle __eq metamethod.
+ } else {
+ |.if FFI
+ |3:
+ | cmp ITYPEd, LJ_TCDATA
+ if (LJ_DUALNUM && vk) {
+ | jne <9
+ } else {
+ | jne <2
+ }
+ | jmp ->vmeta_equal_cd
+ |.endif
+ }
+ break;
+ case BC_ISEQS: case BC_ISNES:
+ vk = op == BC_ISEQS;
+ | ins_AND // RA = src, RD = str const, JMP with RD = target
+ | mov RB, [BASE+RA*8]
+ | add PC, 4
+ | checkstr RB, >3
+ | cmp RB, [KBASE+RD*8]
+ iseqne_test:
+ if (vk) {
+ | jne >2
+ } else {
+ | je >1
+ }
+ goto iseqne_end;
+ case BC_ISEQN: case BC_ISNEN:
+ vk = op == BC_ISEQN;
+ | ins_AD // RA = src, RD = num const, JMP with RD = target
+ | mov RB, [BASE+RA*8]
+ | add PC, 4
+ |.if DUALNUM
+ | checkint RB, >7
+ | mov RD, [KBASE+RD*8]
+ | checkint RD, >8
+ | cmp RBd, RDd
+ if (vk) {
+ | jne >9
+ } else {
+ | je >9
+ }
+ | movzx RDd, PC_RD
+ | branchPC RD
+ |9:
+ | ins_next
+ |
+ |7: // RA is not an integer.
+ | ja >3
+ | // RA is a number.
+ | mov RD, [KBASE+RD*8]
+ | checkint RD, >1
+ | // RA is a number, RD is an integer.
+ | cvtsi2sd xmm0, RDd
+ | jmp >2
+ |
+ |8: // RA is an integer, RD is a number.
+ | cvtsi2sd xmm0, RBd
+ | movd xmm1, RD
+ | ucomisd xmm0, xmm1
+ | jmp >4
+ |1:
+ | movd xmm0, RD
+ |.else
+ | checknum RB, >3
+ |1:
+ | movsd xmm0, qword [KBASE+RD*8]
+ |.endif
+ |2:
+ | ucomisd xmm0, qword [BASE+RA*8]
+ |4:
+ goto iseqne_fp;
+ case BC_ISEQP: case BC_ISNEP:
+ vk = op == BC_ISEQP;
+ | ins_AND // RA = src, RD = primitive type (~), JMP with RD = target
+ | mov RB, [BASE+RA*8]
+ | sar RB, 47
+ | add PC, 4
+ | cmp RBd, RDd
+ if (!LJ_HASFFI) goto iseqne_test;
+ if (vk) {
+ | jne >3
+ | movzx RDd, PC_RD
+ | branchPC RD
+ |2:
+ | ins_next
+ |3:
+ | cmp RBd, LJ_TCDATA; jne <2
+ | jmp ->vmeta_equal_cd
+ } else {
+ | je >2
+ | cmp RBd, LJ_TCDATA; je ->vmeta_equal_cd
+ | movzx RDd, PC_RD
+ | branchPC RD
+ |2:
+ | ins_next
+ }
+ break;
+
+ /* -- Unary test and copy ops ------------------------------------------- */
+
+ case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
+ | ins_AD // RA = dst or unused, RD = src, JMP with RD = target
+ | mov ITYPE, [BASE+RD*8]
+ | add PC, 4
+ if (op == BC_ISTC || op == BC_ISFC) {
+ | mov RB, ITYPE
+ }
+ | sar ITYPE, 47
+ | cmp ITYPEd, LJ_TISTRUECOND
+ if (op == BC_IST || op == BC_ISTC) {
+ | jae >1
+ } else {
+ | jb >1
+ }
+ if (op == BC_ISTC || op == BC_ISFC) {
+ | mov [BASE+RA*8], RB
+ }
+ | movzx RDd, PC_RD
+ | branchPC RD
+ |1: // Fallthrough to the next instruction.
+ | ins_next
+ break;
+
+ case BC_ISTYPE:
+ | ins_AD // RA = src, RD = -type
+ | mov RB, [BASE+RA*8]
+ | sar RB, 47
+ | add RBd, RDd
+ | jne ->vmeta_istype
+ | ins_next
+ break;
+ case BC_ISNUM:
+ | ins_AD // RA = src, RD = -(TISNUM-1)
+ | checknumtp [BASE+RA*8], ->vmeta_istype
+ | ins_next
+ break;
+
+ /* -- Unary ops --------------------------------------------------------- */
+
+ case BC_MOV:
+ | ins_AD // RA = dst, RD = src
+ | mov RB, [BASE+RD*8]
+ | mov [BASE+RA*8], RB
+ | ins_next_
+ break;
+ case BC_NOT:
+ | ins_AD // RA = dst, RD = src
+ | mov RB, [BASE+RD*8]
+ | sar RB, 47
+ | mov RCd, 2
+ | cmp RB, LJ_TISTRUECOND
+ | sbb RCd, 0
+ | shl RC, 47
+ | not RC
+ | mov [BASE+RA*8], RC
+ | ins_next
+ break;
+ case BC_UNM:
+ | ins_AD // RA = dst, RD = src
+ | mov RB, [BASE+RD*8]
+ |.if DUALNUM
+ | checkint RB, >5
+ | neg RBd
+ | jo >4
+ | setint RB
+ |9:
+ | mov [BASE+RA*8], RB
+ | ins_next
+ |4:
+ | mov64 RB, U64x(41e00000,00000000) // 2^31.
+ | jmp <9
+ |5:
+ | ja ->vmeta_unm
+ |.else
+ | checknum RB, ->vmeta_unm
+ |.endif
+ | mov64 RD, U64x(80000000,00000000)
+ | xor RB, RD
+ |.if DUALNUM
+ | jmp <9
+ |.else
+ | mov [BASE+RA*8], RB
+ | ins_next
+ |.endif
+ break;
+ case BC_LEN:
+ | ins_AD // RA = dst, RD = src
+ | mov RD, [BASE+RD*8]
+ | checkstr RD, >2
+ |.if DUALNUM
+ | mov RDd, dword STR:RD->len
+ |1:
+ | setint RD
+ | mov [BASE+RA*8], RD
+ |.else
+ | xorps xmm0, xmm0
+ | cvtsi2sd xmm0, dword STR:RD->len
+ |1:
+ | movsd qword [BASE+RA*8], xmm0
+ |.endif
+ | ins_next
+ |2:
+ | cmp ITYPEd, LJ_TTAB; jne ->vmeta_len
+ | mov TAB:CARG1, TAB:RD
+#if LJ_52
+ | mov TAB:RB, TAB:RD->metatable
+ | cmp TAB:RB, 0
+ | jnz >9
+ |3:
+#endif
+ |->BC_LEN_Z:
+ | mov RB, BASE // Save BASE.
+ | call extern lj_tab_len // (GCtab *t)
+ | // Length of table returned in eax (RD).
+ |.if DUALNUM
+ | // Nothing to do.
+ |.else
+ | cvtsi2sd xmm0, RDd
+ |.endif
+ | mov BASE, RB // Restore BASE.
+ | movzx RAd, PC_RA
+ | jmp <1
+#if LJ_52
+ |9: // Check for __len.
+ | test byte TAB:RB->nomm, 1<<MM_len
+ | jnz <3
+ | jmp ->vmeta_len // 'no __len' flag NOT set: check.
+#endif
+ break;
+
+ /* -- Binary ops -------------------------------------------------------- */
+
+ |.macro ins_arithpre, sseins, ssereg
+ | ins_ABC
+ ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
+ ||switch (vk) {
+ ||case 0:
+ | checknumtp [BASE+RB*8], ->vmeta_arith_vn
+ | .if DUALNUM
+ | checknumtp [KBASE+RC*8], ->vmeta_arith_vn
+ | .endif
+ | movsd xmm0, qword [BASE+RB*8]
+ | sseins ssereg, qword [KBASE+RC*8]
+ || break;
+ ||case 1:
+ | checknumtp [BASE+RB*8], ->vmeta_arith_nv
+ | .if DUALNUM
+ | checknumtp [KBASE+RC*8], ->vmeta_arith_nv
+ | .endif
+ | movsd xmm0, qword [KBASE+RC*8]
+ | sseins ssereg, qword [BASE+RB*8]
+ || break;
+ ||default:
+ | checknumtp [BASE+RB*8], ->vmeta_arith_vv
+ | checknumtp [BASE+RC*8], ->vmeta_arith_vv
+ | movsd xmm0, qword [BASE+RB*8]
+ | sseins ssereg, qword [BASE+RC*8]
+ || break;
+ ||}
+ |.endmacro
+ |
+ |.macro ins_arithdn, intins
+ | ins_ABC
+ ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
+ ||switch (vk) {
+ ||case 0:
+ | mov RB, [BASE+RB*8]
+ | mov RC, [KBASE+RC*8]
+ | checkint RB, ->vmeta_arith_vno
+ | checkint RC, ->vmeta_arith_vno
+ | intins RBd, RCd; jo ->vmeta_arith_vno
+ || break;
+ ||case 1:
+ | mov RB, [BASE+RB*8]
+ | mov RC, [KBASE+RC*8]
+ | checkint RB, ->vmeta_arith_nvo
+ | checkint RC, ->vmeta_arith_nvo
+ | intins RCd, RBd; jo ->vmeta_arith_nvo
+ || break;
+ ||default:
+ | mov RB, [BASE+RB*8]
+ | mov RC, [BASE+RC*8]
+ | checkint RB, ->vmeta_arith_vvo
+ | checkint RC, ->vmeta_arith_vvo
+ | intins RBd, RCd; jo ->vmeta_arith_vvo
+ || break;
+ ||}
+ ||if (vk == 1) {
+ | setint RC
+ | mov [BASE+RA*8], RC
+ ||} else {
+ | setint RB
+ | mov [BASE+RA*8], RB
+ ||}
+ | ins_next
+ |.endmacro
+ |
+ |.macro ins_arithpost
+ | movsd qword [BASE+RA*8], xmm0
+ |.endmacro
+ |
+ |.macro ins_arith, sseins
+ | ins_arithpre sseins, xmm0
+ | ins_arithpost
+ | ins_next
+ |.endmacro
+ |
+ |.macro ins_arith, intins, sseins
+ |.if DUALNUM
+ | ins_arithdn intins
+ |.else
+ | ins_arith, sseins
+ |.endif
+ |.endmacro
+
+ | // RA = dst, RB = src1 or num const, RC = src2 or num const
+ case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
+ | ins_arith add, addsd
+ break;
+ case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
+ | ins_arith sub, subsd
+ break;
+ case BC_MULVN: case BC_MULNV: case BC_MULVV:
+ | ins_arith imul, mulsd
+ break;
+ case BC_DIVVN: case BC_DIVNV: case BC_DIVVV:
+ | ins_arith divsd
+ break;
+ case BC_MODVN:
+ | ins_arithpre movsd, xmm1
+ |->BC_MODVN_Z:
+ | call ->vm_mod
+ | ins_arithpost
+ | ins_next
+ break;
+ case BC_MODNV: case BC_MODVV:
+ | ins_arithpre movsd, xmm1
+ | jmp ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway.
+ break;
+ case BC_POW:
+ | ins_arithpre movsd, xmm1
+ | mov RB, BASE
+ | call extern pow
+ | movzx RAd, PC_RA
+ | mov BASE, RB
+ | ins_arithpost
+ | ins_next
+ break;
+
+ case BC_CAT:
+ | ins_ABC // RA = dst, RB = src_start, RC = src_end
+ | mov L:CARG1, SAVE_L
+ | mov L:CARG1->base, BASE
+ | lea CARG2, [BASE+RC*8]
+ | mov CARG3d, RCd
+ | sub CARG3d, RBd
+ |->BC_CAT_Z:
+ | mov L:RB, L:CARG1
+ | mov SAVE_PC, PC
+ | call extern lj_meta_cat // (lua_State *L, TValue *top, int left)
+ | // NULL (finished) or TValue * (metamethod) returned in eax (RC).
+ | mov BASE, L:RB->base
+ | test RC, RC
+ | jnz ->vmeta_binop
+ | movzx RBd, PC_RB // Copy result to Stk[RA] from Stk[RB].
+ | movzx RAd, PC_RA
+ | mov RC, [BASE+RB*8]
+ | mov [BASE+RA*8], RC
+ | ins_next
+ break;
+
+ /* -- Constant ops ------------------------------------------------------ */
+
+ case BC_KSTR:
+ | ins_AND // RA = dst, RD = str const (~)
+ | mov RD, [KBASE+RD*8]
+ | settp RD, LJ_TSTR
+ | mov [BASE+RA*8], RD
+ | ins_next
+ break;
+ case BC_KCDATA:
+ |.if FFI
+ | ins_AND // RA = dst, RD = cdata const (~)
+ | mov RD, [KBASE+RD*8]
+ | settp RD, LJ_TCDATA
+ | mov [BASE+RA*8], RD
+ | ins_next
+ |.endif
+ break;
+ case BC_KSHORT:
+ | ins_AD // RA = dst, RD = signed int16 literal
+ |.if DUALNUM
+ | movsx RDd, RDW
+ | setint RD
+ | mov [BASE+RA*8], RD
+ |.else
+ | movsx RDd, RDW // Sign-extend literal.
+ | cvtsi2sd xmm0, RDd
+ | movsd qword [BASE+RA*8], xmm0
+ |.endif
+ | ins_next
+ break;
+ case BC_KNUM:
+ | ins_AD // RA = dst, RD = num const
+ | movsd xmm0, qword [KBASE+RD*8]
+ | movsd qword [BASE+RA*8], xmm0
+ | ins_next
+ break;
+ case BC_KPRI:
+ | ins_AD // RA = dst, RD = primitive type (~)
+ | shl RD, 47
+ | not RD
+ | mov [BASE+RA*8], RD
+ | ins_next
+ break;
+ case BC_KNIL:
+ | ins_AD // RA = dst_start, RD = dst_end
+ | lea RA, [BASE+RA*8+8]
+ | lea RD, [BASE+RD*8]
+ | mov RB, LJ_TNIL
+ | mov [RA-8], RB // Sets minimum 2 slots.
+ |1:
+ | mov [RA], RB
+ | add RA, 8
+ | cmp RA, RD
+ | jbe <1
+ | ins_next
+ break;
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ | ins_AD // RA = dst, RD = upvalue #
+ | mov LFUNC:RB, [BASE-16]
+ | cleartp LFUNC:RB
+ | mov UPVAL:RB, [LFUNC:RB+RD*8+offsetof(GCfuncL, uvptr)]
+ | mov RB, UPVAL:RB->v
+ | mov RD, [RB]
+ | mov [BASE+RA*8], RD
+ | ins_next
+ break;
+ case BC_USETV:
+#define TV2MARKOFS \
+ ((int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv))
+ | ins_AD // RA = upvalue #, RD = src
+ | mov LFUNC:RB, [BASE-16]
+ | cleartp LFUNC:RB
+ | mov UPVAL:RB, [LFUNC:RB+RA*8+offsetof(GCfuncL, uvptr)]
+ | cmp byte UPVAL:RB->closed, 0
+ | mov RB, UPVAL:RB->v
+ | mov RA, [BASE+RD*8]
+ | mov [RB], RA
+ | jz >1
+ | // Check barrier for closed upvalue.
+ | test byte [RB+TV2MARKOFS], LJ_GC_BLACK // isblack(uv)
+ | jnz >2
+ |1:
+ | ins_next
+ |
+ |2: // Upvalue is black. Check if new value is collectable and white.
+ | mov RD, RA
+ | sar RD, 47
+ | sub RDd, LJ_TISGCV
+ | cmp RDd, LJ_TNUMX - LJ_TISGCV // tvisgcv(v)
+ | jbe <1
+ | cleartp GCOBJ:RA
+ | test byte GCOBJ:RA->gch.marked, LJ_GC_WHITES // iswhite(v)
+ | jz <1
+ | // Crossed a write barrier. Move the barrier forward.
+ |.if not X64WIN
+ | mov CARG2, RB
+ | mov RB, BASE // Save BASE.
+ |.else
+ | xchg CARG2, RB // Save BASE (CARG2 == BASE).
+ |.endif
+ | lea GL:CARG1, [DISPATCH+GG_DISP2G]
+ | call extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ | mov BASE, RB // Restore BASE.
+ | jmp <1
+ break;
+#undef TV2MARKOFS
+ case BC_USETS:
+ | ins_AND // RA = upvalue #, RD = str const (~)
+ | mov LFUNC:RB, [BASE-16]
+ | cleartp LFUNC:RB
+ | mov UPVAL:RB, [LFUNC:RB+RA*8+offsetof(GCfuncL, uvptr)]
+ | mov STR:RA, [KBASE+RD*8]
+ | mov RD, UPVAL:RB->v
+ | settp STR:ITYPE, STR:RA, LJ_TSTR
+ | mov [RD], STR:ITYPE
+ | test byte UPVAL:RB->marked, LJ_GC_BLACK // isblack(uv)
+ | jnz >2
+ |1:
+ | ins_next
+ |
+ |2: // Check if string is white and ensure upvalue is closed.
+ | test byte GCOBJ:RA->gch.marked, LJ_GC_WHITES // iswhite(str)
+ | jz <1
+ | cmp byte UPVAL:RB->closed, 0
+ | jz <1
+ | // Crossed a write barrier. Move the barrier forward.
+ | mov RB, BASE // Save BASE (CARG2 == BASE).
+ | mov CARG2, RD
+ | lea GL:CARG1, [DISPATCH+GG_DISP2G]
+ | call extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ | mov BASE, RB // Restore BASE.
+ | jmp <1
+ break;
+ case BC_USETN:
+ | ins_AD // RA = upvalue #, RD = num const
+ | mov LFUNC:RB, [BASE-16]
+ | cleartp LFUNC:RB
+ | movsd xmm0, qword [KBASE+RD*8]
+ | mov UPVAL:RB, [LFUNC:RB+RA*8+offsetof(GCfuncL, uvptr)]
+ | mov RA, UPVAL:RB->v
+ | movsd qword [RA], xmm0
+ | ins_next
+ break;
+ case BC_USETP:
+ | ins_AD // RA = upvalue #, RD = primitive type (~)
+ | mov LFUNC:RB, [BASE-16]
+ | cleartp LFUNC:RB
+ | mov UPVAL:RB, [LFUNC:RB+RA*8+offsetof(GCfuncL, uvptr)]
+ | shl RD, 47
+ | not RD
+ | mov RA, UPVAL:RB->v
+ | mov [RA], RD
+ | ins_next
+ break;
+ case BC_UCLO:
+ | ins_AD // RA = level, RD = target
+ | branchPC RD // Do this first to free RD.
+ | mov L:RB, SAVE_L
+ | cmp aword L:RB->openupval, 0
+ | je >1
+ | mov L:RB->base, BASE
+ | lea CARG2, [BASE+RA*8] // Caveat: CARG2 == BASE
+ | mov L:CARG1, L:RB // Caveat: CARG1 == RA
+ | call extern lj_func_closeuv // (lua_State *L, TValue *level)
+ | mov BASE, L:RB->base
+ |1:
+ | ins_next
+ break;
+
+ case BC_FNEW:
+ | ins_AND // RA = dst, RD = proto const (~) (holding function prototype)
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Caveat: CARG2/CARG3 may be BASE.
+ | mov CARG3, [BASE-16]
+ | cleartp CARG3
+ | mov CARG2, [KBASE+RD*8] // Fetch GCproto *.
+ | mov CARG1, L:RB
+ | mov SAVE_PC, PC
+ | // (lua_State *L, GCproto *pt, GCfuncL *parent)
+ | call extern lj_func_newL_gc
+ | // GCfuncL * returned in eax (RC).
+ | mov BASE, L:RB->base
+ | movzx RAd, PC_RA
+ | settp LFUNC:RC, LJ_TFUNC
+ | mov [BASE+RA*8], LFUNC:RC
+ | ins_next
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_TNEW:
+ | ins_AD // RA = dst, RD = hbits|asize
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov RA, [DISPATCH+DISPATCH_GL(gc.total)]
+ | cmp RA, [DISPATCH+DISPATCH_GL(gc.threshold)]
+ | mov SAVE_PC, PC
+ | jae >5
+ |1:
+ | mov CARG3d, RDd
+ | and RDd, 0x7ff
+ | shr CARG3d, 11
+ | cmp RDd, 0x7ff
+ | je >3
+ |2:
+ | mov L:CARG1, L:RB
+ | mov CARG2d, RDd
+ | call extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits)
+ | // Table * returned in eax (RC).
+ | mov BASE, L:RB->base
+ | movzx RAd, PC_RA
+ | settp TAB:RC, LJ_TTAB
+ | mov [BASE+RA*8], TAB:RC
+ | ins_next
+ |3: // Turn 0x7ff into 0x801.
+ | mov RDd, 0x801
+ | jmp <2
+ |5:
+ | mov L:CARG1, L:RB
+ | call extern lj_gc_step_fixtop // (lua_State *L)
+ | movzx RDd, PC_RD
+ | jmp <1
+ break;
+ case BC_TDUP:
+ | ins_AND // RA = dst, RD = table const (~) (holding template table)
+ | mov L:RB, SAVE_L
+ | mov RA, [DISPATCH+DISPATCH_GL(gc.total)]
+ | mov SAVE_PC, PC
+ | cmp RA, [DISPATCH+DISPATCH_GL(gc.threshold)]
+ | mov L:RB->base, BASE
+ | jae >3
+ |2:
+ | mov TAB:CARG2, [KBASE+RD*8] // Caveat: CARG2 == BASE
+ | mov L:CARG1, L:RB // Caveat: CARG1 == RA
+ | call extern lj_tab_dup // (lua_State *L, Table *kt)
+ | // Table * returned in eax (RC).
+ | mov BASE, L:RB->base
+ | movzx RAd, PC_RA
+ | settp TAB:RC, LJ_TTAB
+ | mov [BASE+RA*8], TAB:RC
+ | ins_next
+ |3:
+ | mov L:CARG1, L:RB
+ | call extern lj_gc_step_fixtop // (lua_State *L)
+ | movzx RDd, PC_RD // Need to reload RD.
+ | not RD
+ | jmp <2
+ break;
+
+ case BC_GGET:
+ | ins_AND // RA = dst, RD = str const (~)
+ | mov LFUNC:RB, [BASE-16]
+ | cleartp LFUNC:RB
+ | mov TAB:RB, LFUNC:RB->env
+ | mov STR:RC, [KBASE+RD*8]
+ | jmp ->BC_TGETS_Z
+ break;
+ case BC_GSET:
+ | ins_AND // RA = src, RD = str const (~)
+ | mov LFUNC:RB, [BASE-16]
+ | cleartp LFUNC:RB
+ | mov TAB:RB, LFUNC:RB->env
+ | mov STR:RC, [KBASE+RD*8]
+ | jmp ->BC_TSETS_Z
+ break;
+
+ case BC_TGETV:
+ | ins_ABC // RA = dst, RB = table, RC = key
+ | mov TAB:RB, [BASE+RB*8]
+ | mov RC, [BASE+RC*8]
+ | checktab TAB:RB, ->vmeta_tgetv
+ |
+ | // Integer key?
+ |.if DUALNUM
+ | checkint RC, >5
+ |.else
+ | // Convert number to int and back and compare.
+ | checknum RC, >5
+ | movd xmm0, RC
+ | cvttsd2si RCd, xmm0
+ | cvtsi2sd xmm1, RCd
+ | ucomisd xmm0, xmm1
+ | jne ->vmeta_tgetv // Generic numeric key? Use fallback.
+ |.endif
+ | cmp RCd, TAB:RB->asize // Takes care of unordered, too.
+ | jae ->vmeta_tgetv // Not in array part? Use fallback.
+ | shl RCd, 3
+ | add RC, TAB:RB->array
+ | // Get array slot.
+ | mov ITYPE, [RC]
+ | cmp ITYPE, LJ_TNIL // Avoid overwriting RB in fastpath.
+ | je >2
+ |1:
+ | mov [BASE+RA*8], ITYPE
+ | ins_next
+ |
+ |2: // Check for __index if table value is nil.
+ | mov TAB:TMPR, TAB:RB->metatable
+ | test TAB:TMPR, TAB:TMPR
+ | jz <1
+ | test byte TAB:TMPR->nomm, 1<<MM_index
+ | jz ->vmeta_tgetv // 'no __index' flag NOT set: check.
+ | jmp <1
+ |
+ |5: // String key?
+ | cmp ITYPEd, LJ_TSTR; jne ->vmeta_tgetv
+ | cleartp STR:RC
+ | jmp ->BC_TGETS_Z
+ break;
+ case BC_TGETS:
+ | ins_ABC // RA = dst, RB = table, RC = str const (~)
+ | mov TAB:RB, [BASE+RB*8]
+ | not RC
+ | mov STR:RC, [KBASE+RC*8]
+ | checktab TAB:RB, ->vmeta_tgets
+ |->BC_TGETS_Z: // RB = GCtab *, RC = GCstr *
+ | mov TMPRd, TAB:RB->hmask
+ | and TMPRd, STR:RC->sid
+ | imul TMPRd, #NODE
+ | add NODE:TMPR, TAB:RB->node
+ | settp ITYPE, STR:RC, LJ_TSTR
+ |1:
+ | cmp NODE:TMPR->key, ITYPE
+ | jne >4
+ | // Get node value.
+ | mov ITYPE, NODE:TMPR->val
+ | cmp ITYPE, LJ_TNIL
+ | je >5 // Key found, but nil value?
+ |2:
+ | mov [BASE+RA*8], ITYPE
+ | ins_next
+ |
+ |4: // Follow hash chain.
+ | mov NODE:TMPR, NODE:TMPR->next
+ | test NODE:TMPR, NODE:TMPR
+ | jnz <1
+ | // End of hash chain: key not found, nil result.
+ | mov ITYPE, LJ_TNIL
+ |
+ |5: // Check for __index if table value is nil.
+ | mov TAB:TMPR, TAB:RB->metatable
+ | test TAB:TMPR, TAB:TMPR
+ | jz <2 // No metatable: done.
+ | test byte TAB:TMPR->nomm, 1<<MM_index
+ | jnz <2 // 'no __index' flag set: done.
+ | jmp ->vmeta_tgets // Caveat: preserve STR:RC.
+ break;
+ case BC_TGETB:
+ | ins_ABC // RA = dst, RB = table, RC = byte literal
+ | mov TAB:RB, [BASE+RB*8]
+ | checktab TAB:RB, ->vmeta_tgetb
+ | cmp RCd, TAB:RB->asize
+ | jae ->vmeta_tgetb
+ | shl RCd, 3
+ | add RC, TAB:RB->array
+ | // Get array slot.
+ | mov ITYPE, [RC]
+ | cmp ITYPE, LJ_TNIL
+ | je >2
+ |1:
+ | mov [BASE+RA*8], ITYPE
+ | ins_next
+ |
+ |2: // Check for __index if table value is nil.
+ | mov TAB:TMPR, TAB:RB->metatable
+ | test TAB:TMPR, TAB:TMPR
+ | jz <1
+ | test byte TAB:TMPR->nomm, 1<<MM_index
+ | jz ->vmeta_tgetb // 'no __index' flag NOT set: check.
+ | jmp <1
+ break;
+ case BC_TGETR:
+ | ins_ABC // RA = dst, RB = table, RC = key
+ | mov TAB:RB, [BASE+RB*8]
+ | cleartp TAB:RB
+ |.if DUALNUM
+ | mov RCd, dword [BASE+RC*8]
+ |.else
+ | cvttsd2si RCd, qword [BASE+RC*8]
+ |.endif
+ | cmp RCd, TAB:RB->asize
+ | jae ->vmeta_tgetr // Not in array part? Use fallback.
+ | shl RCd, 3
+ | add RC, TAB:RB->array
+ | // Get array slot.
+ |->BC_TGETR_Z:
+ | mov ITYPE, [RC]
+ |->BC_TGETR2_Z:
+ | mov [BASE+RA*8], ITYPE
+ | ins_next
+ break;
+
+ case BC_TSETV:
+ | ins_ABC // RA = src, RB = table, RC = key
+ | mov TAB:RB, [BASE+RB*8]
+ | mov RC, [BASE+RC*8]
+ | checktab TAB:RB, ->vmeta_tsetv
+ |
+ | // Integer key?
+ |.if DUALNUM
+ | checkint RC, >5
+ |.else
+ | // Convert number to int and back and compare.
+ | checknum RC, >5
+ | movd xmm0, RC
+ | cvttsd2si RCd, xmm0
+ | cvtsi2sd xmm1, RCd
+ | ucomisd xmm0, xmm1
+ | jne ->vmeta_tsetv // Generic numeric key? Use fallback.
+ |.endif
+ | cmp RCd, TAB:RB->asize // Takes care of unordered, too.
+ | jae ->vmeta_tsetv
+ | shl RCd, 3
+ | add RC, TAB:RB->array
+ | cmp aword [RC], LJ_TNIL
+ | je >3 // Previous value is nil?
+ |1:
+ | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
+ | jnz >7
+ |2: // Set array slot.
+ | mov RB, [BASE+RA*8]
+ | mov [RC], RB
+ | ins_next
+ |
+ |3: // Check for __newindex if previous value is nil.
+ | mov TAB:TMPR, TAB:RB->metatable
+ | test TAB:TMPR, TAB:TMPR
+ | jz <1
+ | test byte TAB:TMPR->nomm, 1<<MM_newindex
+ | jz ->vmeta_tsetv // 'no __newindex' flag NOT set: check.
+ | jmp <1
+ |
+ |5: // String key?
+ | cmp ITYPEd, LJ_TSTR; jne ->vmeta_tsetv
+ | cleartp STR:RC
+ | jmp ->BC_TSETS_Z
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMPR
+ | jmp <2
+ break;
+ case BC_TSETS:
+ | ins_ABC // RA = src, RB = table, RC = str const (~)
+ | mov TAB:RB, [BASE+RB*8]
+ | not RC
+ | mov STR:RC, [KBASE+RC*8]
+ | checktab TAB:RB, ->vmeta_tsets
+ |->BC_TSETS_Z: // RB = GCtab *, RC = GCstr *
+ | mov TMPRd, TAB:RB->hmask
+ | and TMPRd, STR:RC->sid
+ | imul TMPRd, #NODE
+ | mov byte TAB:RB->nomm, 0 // Clear metamethod cache.
+ | add NODE:TMPR, TAB:RB->node
+ | settp ITYPE, STR:RC, LJ_TSTR
+ |1:
+ | cmp NODE:TMPR->key, ITYPE
+ | jne >5
+ | // Ok, key found. Assumes: offsetof(Node, val) == 0
+ | cmp aword [TMPR], LJ_TNIL
+ | je >4 // Previous value is nil?
+ |2:
+ | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
+ | jnz >7
+ |3: // Set node value.
+ | mov ITYPE, [BASE+RA*8]
+ | mov [TMPR], ITYPE
+ | ins_next
+ |
+ |4: // Check for __newindex if previous value is nil.
+ | mov TAB:ITYPE, TAB:RB->metatable
+ | test TAB:ITYPE, TAB:ITYPE
+ | jz <2
+ | test byte TAB:ITYPE->nomm, 1<<MM_newindex
+ | jz ->vmeta_tsets // 'no __newindex' flag NOT set: check.
+ | jmp <2
+ |
+ |5: // Follow hash chain.
+ | mov NODE:TMPR, NODE:TMPR->next
+ | test NODE:TMPR, NODE:TMPR
+ | jnz <1
+ | // End of hash chain: key not found, add a new one.
+ |
+ | // But check for __newindex first.
+ | mov TAB:TMPR, TAB:RB->metatable
+ | test TAB:TMPR, TAB:TMPR
+ | jz >6 // No metatable: continue.
+ | test byte TAB:TMPR->nomm, 1<<MM_newindex
+ | jz ->vmeta_tsets // 'no __newindex' flag NOT set: check.
+ |6:
+ | mov TMP1, ITYPE
+ | mov L:CARG1, SAVE_L
+ | mov L:CARG1->base, BASE
+ | lea CARG3, TMP1
+ | mov CARG2, TAB:RB
+ | mov SAVE_PC, PC
+ | call extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k)
+ | // Handles write barrier for the new key. TValue * returned in eax (RC).
+ | mov L:CARG1, SAVE_L
+ | mov BASE, L:CARG1->base
+ | mov TMPR, rax
+ | movzx RAd, PC_RA
+ | jmp <2 // Must check write barrier for value.
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, ITYPE
+ | jmp <3
+ break;
+ case BC_TSETB:
+ | ins_ABC // RA = src, RB = table, RC = byte literal
+ | mov TAB:RB, [BASE+RB*8]
+ | checktab TAB:RB, ->vmeta_tsetb
+ | cmp RCd, TAB:RB->asize
+ | jae ->vmeta_tsetb
+ | shl RCd, 3
+ | add RC, TAB:RB->array
+ | cmp aword [RC], LJ_TNIL
+ | je >3 // Previous value is nil?
+ |1:
+ | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
+ | jnz >7
+ |2: // Set array slot.
+ | mov ITYPE, [BASE+RA*8]
+ | mov [RC], ITYPE
+ | ins_next
+ |
+ |3: // Check for __newindex if previous value is nil.
+ | mov TAB:TMPR, TAB:RB->metatable
+ | test TAB:TMPR, TAB:TMPR
+ | jz <1
+ | test byte TAB:TMPR->nomm, 1<<MM_newindex
+ | jz ->vmeta_tsetb // 'no __newindex' flag NOT set: check.
+ | jmp <1
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMPR
+ | jmp <2
+ break;
+ case BC_TSETR:
+ | ins_ABC // RA = src, RB = table, RC = key
+ | mov TAB:RB, [BASE+RB*8]
+ | cleartp TAB:RB
+ |.if DUALNUM
+ | mov RC, [BASE+RC*8]
+ |.else
+ | cvttsd2si RCd, qword [BASE+RC*8]
+ |.endif
+ | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
+ | jnz >7
+ |2:
+ | cmp RCd, TAB:RB->asize
+ | jae ->vmeta_tsetr
+ | shl RCd, 3
+ | add RC, TAB:RB->array
+ | // Set array slot.
+ |->BC_TSETR_Z:
+ | mov ITYPE, [BASE+RA*8]
+ | mov [RC], ITYPE
+ | ins_next
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMPR
+ | jmp <2
+ break;
+
+ case BC_TSETM:
+ | ins_AD // RA = base (table at base-1), RD = num const (start index)
+ |1:
+ | mov TMPRd, dword [KBASE+RD*8] // Integer constant is in lo-word.
+ | lea RA, [BASE+RA*8]
+ | mov TAB:RB, [RA-8] // Guaranteed to be a table.
+ | cleartp TAB:RB
+ | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
+ | jnz >7
+ |2:
+ | mov RDd, MULTRES
+ | sub RDd, 1
+ | jz >4 // Nothing to copy?
+ | add RDd, TMPRd // Compute needed size.
+ | cmp RDd, TAB:RB->asize
+ | ja >5 // Doesn't fit into array part?
+ | sub RDd, TMPRd
+ | shl TMPRd, 3
+ | add TMPR, TAB:RB->array
+ |3: // Copy result slots to table.
+ | mov RB, [RA]
+ | add RA, 8
+ | mov [TMPR], RB
+ | add TMPR, 8
+ | sub RDd, 1
+ | jnz <3
+ |4:
+ | ins_next
+ |
+ |5: // Need to resize array part.
+ | mov L:CARG1, SAVE_L
+ | mov L:CARG1->base, BASE // Caveat: CARG2/CARG3 may be BASE.
+ | mov CARG2, TAB:RB
+ | mov CARG3d, RDd
+ | mov L:RB, L:CARG1
+ | mov SAVE_PC, PC
+ | call extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize)
+ | mov BASE, L:RB->base
+ | movzx RAd, PC_RA // Restore RA.
+ | movzx RDd, PC_RD // Restore RD.
+ | jmp <1 // Retry.
+ |
+ |7: // Possible table write barrier for any value. Skip valiswhite check.
+ | barrierback TAB:RB, RD
+ | jmp <2
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_CALL: case BC_CALLM:
+ | ins_A_C // RA = base, (RB = nresults+1,) RC = nargs+1 | extra_nargs
+ if (op == BC_CALLM) {
+ | add NARGS:RDd, MULTRES
+ }
+ | mov LFUNC:RB, [BASE+RA*8]
+ | checkfunc LFUNC:RB, ->vmeta_call_ra
+ | lea BASE, [BASE+RA*8+16]
+ | ins_call
+ break;
+
+ case BC_CALLMT:
+ | ins_AD // RA = base, RD = extra_nargs
+ | add NARGS:RDd, MULTRES
+ | // Fall through. Assumes BC_CALLT follows and ins_AD is a no-op.
+ break;
+ case BC_CALLT:
+ | ins_AD // RA = base, RD = nargs+1
+ | lea RA, [BASE+RA*8+16]
+ | mov KBASE, BASE // Use KBASE for move + vmeta_call hint.
+ | mov LFUNC:RB, [RA-16]
+ | checktp_nc LFUNC:RB, LJ_TFUNC, ->vmeta_call
+ |->BC_CALLT_Z:
+ | mov PC, [BASE-8]
+ | test PCd, FRAME_TYPE
+ | jnz >7
+ |1:
+ | mov [BASE-16], LFUNC:RB // Copy func+tag down, reloaded below.
+ | mov MULTRES, NARGS:RDd
+ | sub NARGS:RDd, 1
+ | jz >3
+ |2: // Move args down.
+ | mov RB, [RA]
+ | add RA, 8
+ | mov [KBASE], RB
+ | add KBASE, 8
+ | sub NARGS:RDd, 1
+ | jnz <2
+ |
+ | mov LFUNC:RB, [BASE-16]
+ |3:
+ | cleartp LFUNC:RB
+ | mov NARGS:RDd, MULTRES
+ | cmp byte LFUNC:RB->ffid, 1 // (> FF_C) Calling a fast function?
+ | ja >5
+ |4:
+ | ins_callt
+ |
+ |5: // Tailcall to a fast function.
+ | test PCd, FRAME_TYPE // Lua frame below?
+ | jnz <4
+ | movzx RAd, PC_RA
+ | neg RA
+ | mov LFUNC:KBASE, [BASE+RA*8-32] // Need to prepare KBASE.
+ | cleartp LFUNC:KBASE
+ | mov KBASE, LFUNC:KBASE->pc
+ | mov KBASE, [KBASE+PC2PROTO(k)]
+ | jmp <4
+ |
+ |7: // Tailcall from a vararg function.
+ | sub PC, FRAME_VARG
+ | test PCd, FRAME_TYPEP
+ | jnz >8 // Vararg frame below?
+ | sub BASE, PC // Need to relocate BASE/KBASE down.
+ | mov KBASE, BASE
+ | mov PC, [BASE-8]
+ | jmp <1
+ |8:
+ | add PCd, FRAME_VARG
+ | jmp <1
+ break;
+
+ case BC_ITERC:
+ | ins_A // RA = base, (RB = nresults+1,) RC = nargs+1 (2+1)
+ | lea RA, [BASE+RA*8+16] // fb = base+2
+ | mov RB, [RA-32] // Copy state. fb[0] = fb[-4].
+ | mov RC, [RA-24] // Copy control var. fb[1] = fb[-3].
+ | mov [RA], RB
+ | mov [RA+8], RC
+ | mov LFUNC:RB, [RA-40] // Copy callable. fb[-2] = fb[-5]
+ | mov [RA-16], LFUNC:RB
+ | mov NARGS:RDd, 2+1 // Handle like a regular 2-arg call.
+ | checkfunc LFUNC:RB, ->vmeta_call
+ | mov BASE, RA
+ | ins_call
+ break;
+
+ case BC_ITERN:
+ |.if JIT
+ | hotloop RBd
+ |.endif
+ |->vm_IITERN:
+ | ins_A // RA = base, (RB = nresults+1, RC = nargs+1 (2+1))
+ | mov TAB:RB, [BASE+RA*8-16]
+ | cleartp TAB:RB
+ | mov RCd, [BASE+RA*8-8] // Get index from control var.
+ | mov TMPRd, TAB:RB->asize
+ | add PC, 4
+ | mov ITYPE, TAB:RB->array
+ |1: // Traverse array part.
+ | cmp RCd, TMPRd; jae >5 // Index points after array part?
+ | cmp aword [ITYPE+RC*8], LJ_TNIL; je >4
+ |.if not DUALNUM
+ | cvtsi2sd xmm0, RCd
+ |.endif
+ | // Copy array slot to returned value.
+ | mov RB, [ITYPE+RC*8]
+ | mov [BASE+RA*8+8], RB
+ | // Return array index as a numeric key.
+ |.if DUALNUM
+ | setint ITYPE, RC
+ | mov [BASE+RA*8], ITYPE
+ |.else
+ | movsd qword [BASE+RA*8], xmm0
+ |.endif
+ | add RCd, 1
+ | mov [BASE+RA*8-8], RCd // Update control var.
+ |2:
+ | movzx RDd, PC_RD // Get target from ITERL.
+ | branchPC RD
+ |3:
+ | ins_next
+ |
+ |4: // Skip holes in array part.
+ | add RCd, 1
+ | jmp <1
+ |
+ |5: // Traverse hash part.
+ | sub RCd, TMPRd
+ |6:
+ | cmp RCd, TAB:RB->hmask; ja <3 // End of iteration? Branch to ITERL+1.
+ | imul ITYPEd, RCd, #NODE
+ | add NODE:ITYPE, TAB:RB->node
+ | cmp aword NODE:ITYPE->val, LJ_TNIL; je >7
+ | lea TMPRd, [RCd+TMPRd+1]
+ | // Copy key and value from hash slot.
+ | mov RB, NODE:ITYPE->key
+ | mov RC, NODE:ITYPE->val
+ | mov [BASE+RA*8], RB
+ | mov [BASE+RA*8+8], RC
+ | mov [BASE+RA*8-8], TMPRd
+ | jmp <2
+ |
+ |7: // Skip holes in hash part.
+ | add RCd, 1
+ | jmp <6
+ break;
+
+ case BC_ISNEXT:
+ | ins_AD // RA = base, RD = target (points to ITERN)
+ | mov CFUNC:RB, [BASE+RA*8-24]
+ | checkfunc CFUNC:RB, >5
+ | checktptp [BASE+RA*8-16], LJ_TTAB, >5
+ | cmp aword [BASE+RA*8-8], LJ_TNIL; jne >5
+ | cmp byte CFUNC:RB->ffid, FF_next_N; jne >5
+ | branchPC RD
+ | mov64 TMPR, ((uint64_t)LJ_KEYINDEX << 32)
+ | mov [BASE+RA*8-8], TMPR // Initialize control var.
+ |1:
+ | ins_next
+ |5: // Despecialize bytecode if any of the checks fail.
+ | mov PC_OP, BC_JMP
+ | branchPC RD
+ |.if JIT
+ | cmp byte [PC], BC_ITERN
+ | jne >6
+ |.endif
+ | mov byte [PC], BC_ITERC
+ | jmp <1
+ |.if JIT
+ |6: // Unpatch JLOOP.
+ | mov RA, [DISPATCH+DISPATCH_J(trace)]
+ | movzx RCd, word [PC+2]
+ | mov TRACE:RA, [RA+RC*8]
+ | mov eax, TRACE:RA->startins
+ | mov al, BC_ITERC
+ | mov dword [PC], eax
+ | jmp <1
+ |.endif
+ break;
+
+ case BC_VARG:
+ | ins_ABC // RA = base, RB = nresults+1, RC = numparams
+ | lea TMPR, [BASE+RC*8+(16+FRAME_VARG)]
+ | lea RA, [BASE+RA*8]
+ | sub TMPR, [BASE-8]
+ | // Note: TMPR may now be even _above_ BASE if nargs was < numparams.
+ | test RB, RB
+ | jz >5 // Copy all varargs?
+ | lea RB, [RA+RB*8-8]
+ | cmp TMPR, BASE // No vararg slots?
+ | jnb >2
+ |1: // Copy vararg slots to destination slots.
+ | mov RC, [TMPR-16]
+ | add TMPR, 8
+ | mov [RA], RC
+ | add RA, 8
+ | cmp RA, RB // All destination slots filled?
+ | jnb >3
+ | cmp TMPR, BASE // No more vararg slots?
+ | jb <1
+ |2: // Fill up remainder with nil.
+ | mov aword [RA], LJ_TNIL
+ | add RA, 8
+ | cmp RA, RB
+ | jb <2
+ |3:
+ | ins_next
+ |
+ |5: // Copy all varargs.
+ | mov MULTRES, 1 // MULTRES = 0+1
+ | mov RC, BASE
+ | sub RC, TMPR
+ | jbe <3 // No vararg slots?
+ | mov RBd, RCd
+ | shr RBd, 3
+ | add RBd, 1
+ | mov MULTRES, RBd // MULTRES = #varargs+1
+ | mov L:RB, SAVE_L
+ | add RC, RA
+ | cmp RC, L:RB->maxstack
+ | ja >7 // Need to grow stack?
+ |6: // Copy all vararg slots.
+ | mov RC, [TMPR-16]
+ | add TMPR, 8
+ | mov [RA], RC
+ | add RA, 8
+ | cmp TMPR, BASE // No more vararg slots?
+ | jb <6
+ | jmp <3
+ |
+ |7: // Grow stack for varargs.
+ | mov L:RB->base, BASE
+ | mov L:RB->top, RA
+ | mov SAVE_PC, PC
+ | sub TMPR, BASE // Need delta, because BASE may change.
+ | mov TMP1hi, TMPRd
+ | mov CARG2d, MULTRES
+ | sub CARG2d, 1
+ | mov CARG1, L:RB
+ | call extern lj_state_growstack // (lua_State *L, int n)
+ | mov BASE, L:RB->base
+ | movsxd TMPR, TMP1hi
+ | mov RA, L:RB->top
+ | add TMPR, BASE
+ | jmp <6
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ | ins_AD // RA = results, RD = extra_nresults
+ | add RDd, MULTRES // MULTRES >=1, so RD >=1.
+ | // Fall through. Assumes BC_RET follows and ins_AD is a no-op.
+ break;
+
+ case BC_RET: case BC_RET0: case BC_RET1:
+ | ins_AD // RA = results, RD = nresults+1
+ if (op != BC_RET0) {
+ | shl RAd, 3
+ }
+ |1:
+ | mov PC, [BASE-8]
+ | mov MULTRES, RDd // Save nresults+1.
+ | test PCd, FRAME_TYPE // Check frame type marker.
+ | jnz >7 // Not returning to a fixarg Lua func?
+ switch (op) {
+ case BC_RET:
+ |->BC_RET_Z:
+ | mov KBASE, BASE // Use KBASE for result move.
+ | sub RDd, 1
+ | jz >3
+ |2: // Move results down.
+ | mov RB, [KBASE+RA]
+ | mov [KBASE-16], RB
+ | add KBASE, 8
+ | sub RDd, 1
+ | jnz <2
+ |3:
+ | mov RDd, MULTRES // Note: MULTRES may be >255.
+ | movzx RBd, PC_RB // So cannot compare with RDL!
+ |5:
+ | cmp RBd, RDd // More results expected?
+ | ja >6
+ break;
+ case BC_RET1:
+ | mov RB, [BASE+RA]
+ | mov [BASE-16], RB
+ /* fallthrough */
+ case BC_RET0:
+ |5:
+ | cmp PC_RB, RDL // More results expected?
+ | ja >6
+ default:
+ break;
+ }
+ | movzx RAd, PC_RA
+ | neg RA
+ | lea BASE, [BASE+RA*8-16] // base = base - (RA+2)*8
+ | mov LFUNC:KBASE, [BASE-16]
+ | cleartp LFUNC:KBASE
+ | mov KBASE, LFUNC:KBASE->pc
+ | mov KBASE, [KBASE+PC2PROTO(k)]
+ | ins_next
+ |
+ |6: // Fill up results with nil.
+ if (op == BC_RET) {
+ | mov aword [KBASE-16], LJ_TNIL // Note: relies on shifted base.
+ | add KBASE, 8
+ } else {
+ | mov aword [BASE+RD*8-24], LJ_TNIL
+ }
+ | add RD, 1
+ | jmp <5
+ |
+ |7: // Non-standard return case.
+ | lea RB, [PC-FRAME_VARG]
+ | test RBd, FRAME_TYPEP
+ | jnz ->vm_return
+ | // Return from vararg function: relocate BASE down and RA up.
+ | sub BASE, RB
+ if (op != BC_RET0) {
+ | add RA, RB
+ }
+ | jmp <1
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+ |.define FOR_IDX, [RA]
+ |.define FOR_STOP, [RA+8]
+ |.define FOR_STEP, [RA+16]
+ |.define FOR_EXT, [RA+24]
+
+ case BC_FORL:
+ |.if JIT
+ | hotloop RBd
+ |.endif
+ | // Fall through. Assumes BC_IFORL follows and ins_AJ is a no-op.
+ break;
+
+ case BC_JFORI:
+ case BC_JFORL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_FORI:
+ case BC_IFORL:
+ vk = (op == BC_IFORL || op == BC_JFORL);
+ | ins_AJ // RA = base, RD = target (after end of loop or start of loop)
+ | lea RA, [BASE+RA*8]
+ if (LJ_DUALNUM) {
+ | mov RB, FOR_IDX
+ | checkint RB, >9
+ | mov TMPR, FOR_STOP
+ if (!vk) {
+ | checkint TMPR, ->vmeta_for
+ | mov ITYPE, FOR_STEP
+ | test ITYPEd, ITYPEd; js >5
+ | sar ITYPE, 47;
+ | cmp ITYPEd, LJ_TISNUM; jne ->vmeta_for
+ } else {
+#ifdef LUA_USE_ASSERT
+ | checkinttp FOR_STOP, ->assert_bad_for_arg_type
+ | checkinttp FOR_STEP, ->assert_bad_for_arg_type
+#endif
+ | mov ITYPE, FOR_STEP
+ | test ITYPEd, ITYPEd; js >5
+ | add RBd, ITYPEd; jo >1
+ | setint RB
+ | mov FOR_IDX, RB
+ }
+ | cmp RBd, TMPRd
+ | mov FOR_EXT, RB
+ if (op == BC_FORI) {
+ | jle >7
+ |1:
+ |6:
+ | branchPC RD
+ } else if (op == BC_JFORI) {
+ | branchPC RD
+ | movzx RDd, PC_RD
+ | jle =>BC_JLOOP
+ |1:
+ |6:
+ } else if (op == BC_IFORL) {
+ | jg >7
+ |6:
+ | branchPC RD
+ |1:
+ } else {
+ | jle =>BC_JLOOP
+ |1:
+ |6:
+ }
+ |7:
+ | ins_next
+ |
+ |5: // Invert check for negative step.
+ if (!vk) {
+ | sar ITYPE, 47;
+ | cmp ITYPEd, LJ_TISNUM; jne ->vmeta_for
+ } else {
+ | add RBd, ITYPEd; jo <1
+ | setint RB
+ | mov FOR_IDX, RB
+ }
+ | cmp RBd, TMPRd
+ | mov FOR_EXT, RB
+ if (op == BC_FORI) {
+ | jge <7
+ } else if (op == BC_JFORI) {
+ | branchPC RD
+ | movzx RDd, PC_RD
+ | jge =>BC_JLOOP
+ } else if (op == BC_IFORL) {
+ | jl <7
+ } else {
+ | jge =>BC_JLOOP
+ }
+ | jmp <6
+ |9: // Fallback to FP variant.
+ if (!vk) {
+ | jae ->vmeta_for
+ }
+ } else if (!vk) {
+ | checknumtp FOR_IDX, ->vmeta_for
+ }
+ if (!vk) {
+ | checknumtp FOR_STOP, ->vmeta_for
+ } else {
+#ifdef LUA_USE_ASSERT
+ | checknumtp FOR_STOP, ->assert_bad_for_arg_type
+ | checknumtp FOR_STEP, ->assert_bad_for_arg_type
+#endif
+ }
+ | mov RB, FOR_STEP
+ if (!vk) {
+ | checknum RB, ->vmeta_for
+ }
+ | movsd xmm0, qword FOR_IDX
+ | movsd xmm1, qword FOR_STOP
+ if (vk) {
+ | addsd xmm0, qword FOR_STEP
+ | movsd qword FOR_IDX, xmm0
+ | test RB, RB; js >3
+ } else {
+ | jl >3
+ }
+ | ucomisd xmm1, xmm0
+ |1:
+ | movsd qword FOR_EXT, xmm0
+ if (op == BC_FORI) {
+ |.if DUALNUM
+ | jnb <7
+ |.else
+ | jnb >2
+ | branchPC RD
+ |.endif
+ } else if (op == BC_JFORI) {
+ | branchPC RD
+ | movzx RDd, PC_RD
+ | jnb =>BC_JLOOP
+ } else if (op == BC_IFORL) {
+ |.if DUALNUM
+ | jb <7
+ |.else
+ | jb >2
+ | branchPC RD
+ |.endif
+ } else {
+ | jnb =>BC_JLOOP
+ }
+ |.if DUALNUM
+ | jmp <6
+ |.else
+ |2:
+ | ins_next
+ |.endif
+ |
+ |3: // Invert comparison if step is negative.
+ | ucomisd xmm0, xmm1
+ | jmp <1
+ break;
+
+ case BC_ITERL:
+ |.if JIT
+ | hotloop RBd
+ |.endif
+ | // Fall through. Assumes BC_IITERL follows and ins_AJ is a no-op.
+ break;
+
+ case BC_JITERL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IITERL:
+ | ins_AJ // RA = base, RD = target
+ | lea RA, [BASE+RA*8]
+ | mov RB, [RA]
+ | cmp RB, LJ_TNIL; je >1 // Stop if iterator returned nil.
+ if (op == BC_JITERL) {
+ | mov [RA-8], RB
+ | jmp =>BC_JLOOP
+ } else {
+ | branchPC RD // Otherwise save control var + branch.
+ | mov [RA-8], RB
+ }
+ |1:
+ | ins_next
+ break;
+
+ case BC_LOOP:
+ | ins_A // RA = base, RD = target (loop extent)
+ | // Note: RA/RD is only used by trace recorder to determine scope/extent
+ | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
+ |.if JIT
+ | hotloop RBd
+ |.endif
+ | // Fall through. Assumes BC_ILOOP follows and ins_A is a no-op.
+ break;
+
+ case BC_ILOOP:
+ | ins_A // RA = base, RD = target (loop extent)
+ | ins_next
+ break;
+
+ case BC_JLOOP:
+ |.if JIT
+ | ins_AD // RA = base (ignored), RD = traceno
+ | mov RA, [DISPATCH+DISPATCH_J(trace)]
+ | mov TRACE:RD, [RA+RD*8]
+ | mov RD, TRACE:RD->mcode
+ | mov L:RB, SAVE_L
+ | mov [DISPATCH+DISPATCH_GL(jit_base)], BASE
+ | mov [DISPATCH+DISPATCH_GL(tmpbuf.L)], L:RB
+ | // Save additional callee-save registers only used in compiled code.
+ |.if X64WIN
+ | mov CSAVE_4, r12
+ | mov CSAVE_3, r13
+ | mov CSAVE_2, r14
+ | mov CSAVE_1, r15
+ | mov RA, rsp
+ | sub rsp, 10*16+4*8
+ | movdqa [RA-1*16], xmm6
+ | movdqa [RA-2*16], xmm7
+ | movdqa [RA-3*16], xmm8
+ | movdqa [RA-4*16], xmm9
+ | movdqa [RA-5*16], xmm10
+ | movdqa [RA-6*16], xmm11
+ | movdqa [RA-7*16], xmm12
+ | movdqa [RA-8*16], xmm13
+ | movdqa [RA-9*16], xmm14
+ | movdqa [RA-10*16], xmm15
+ |.else
+ | sub rsp, 16
+ | mov [rsp+16], r12
+ | mov [rsp+8], r13
+ |.endif
+ | jmp RD
+ |.endif
+ break;
+
+ case BC_JMP:
+ | ins_AJ // RA = unused, RD = target
+ | branchPC RD
+ | ins_next
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ /*
+ ** Reminder: A function may be called with func/args above L->maxstack,
+ ** i.e. occupying EXTRA_STACK slots. And vmeta_call may add one extra slot,
+ ** too. This means all FUNC* ops (including fast functions) must check
+ ** for stack overflow _before_ adding more slots!
+ */
+
+ case BC_FUNCF:
+ |.if JIT
+ | hotcall RBd
+ |.endif
+ case BC_FUNCV: /* NYI: compiled vararg functions. */
+ | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow and ins_AD is a no-op.
+ break;
+
+ case BC_JFUNCF:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IFUNCF:
+ | ins_AD // BASE = new base, RA = framesize, RD = nargs+1
+ | mov KBASE, [PC-4+PC2PROTO(k)]
+ | mov L:RB, SAVE_L
+ | lea RA, [BASE+RA*8] // Top of frame.
+ | cmp RA, L:RB->maxstack
+ | ja ->vm_growstack_f
+ | movzx RAd, byte [PC-4+PC2PROTO(numparams)]
+ | cmp NARGS:RDd, RAd // Check for missing parameters.
+ | jbe >3
+ |2:
+ if (op == BC_JFUNCF) {
+ | movzx RDd, PC_RD
+ | jmp =>BC_JLOOP
+ } else {
+ | ins_next
+ }
+ |
+ |3: // Clear missing parameters.
+ | mov aword [BASE+NARGS:RD*8-8], LJ_TNIL
+ | add NARGS:RDd, 1
+ | cmp NARGS:RDd, RAd
+ | jbe <3
+ | jmp <2
+ break;
+
+ case BC_JFUNCV:
+#if !LJ_HASJIT
+ break;
+#endif
+ | int3 // NYI: compiled vararg functions
+ break; /* NYI: compiled vararg functions. */
+
+ case BC_IFUNCV:
+ | ins_AD // BASE = new base, RA = framesize, RD = nargs+1
+ | lea RBd, [NARGS:RD*8+FRAME_VARG+8]
+ | lea RD, [BASE+NARGS:RD*8+8]
+ | mov LFUNC:KBASE, [BASE-16]
+ | mov [RD-8], RB // Store delta + FRAME_VARG.
+ | mov [RD-16], LFUNC:KBASE // Store copy of LFUNC.
+ | mov L:RB, SAVE_L
+ | lea RA, [RD+RA*8]
+ | cmp RA, L:RB->maxstack
+ | ja ->vm_growstack_v // Need to grow stack.
+ | mov RA, BASE
+ | mov BASE, RD
+ | movzx RBd, byte [PC-4+PC2PROTO(numparams)]
+ | test RBd, RBd
+ | jz >2
+ | add RA, 8
+ |1: // Copy fixarg slots up to new frame.
+ | add RA, 8
+ | cmp RA, BASE
+ | jnb >3 // Less args than parameters?
+ | mov KBASE, [RA-16]
+ | mov [RD], KBASE
+ | add RD, 8
+ | mov aword [RA-16], LJ_TNIL // Clear old fixarg slot (help the GC).
+ | sub RBd, 1
+ | jnz <1
+ |2:
+ if (op == BC_JFUNCV) {
+ | movzx RDd, PC_RD
+ | jmp =>BC_JLOOP
+ } else {
+ | mov KBASE, [PC-4+PC2PROTO(k)]
+ | ins_next
+ }
+ |
+ |3: // Clear missing parameters.
+ | mov aword [RD], LJ_TNIL
+ | add RD, 8
+ | sub RBd, 1
+ | jnz <3
+ | jmp <2
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ | ins_AD // BASE = new base, RA = ins RA|RD (unused), RD = nargs+1
+ | mov CFUNC:RB, [BASE-16]
+ | cleartp CFUNC:RB
+ | mov KBASE, CFUNC:RB->f
+ | mov L:RB, SAVE_L
+ | lea RD, [BASE+NARGS:RD*8-8]
+ | mov L:RB->base, BASE
+ | lea RA, [RD+8*LUA_MINSTACK]
+ | cmp RA, L:RB->maxstack
+ | mov L:RB->top, RD
+ if (op == BC_FUNCC) {
+ | mov CARG1, L:RB // Caveat: CARG1 may be RA.
+ } else {
+ | mov CARG2, KBASE
+ | mov CARG1, L:RB // Caveat: CARG1 may be RA.
+ }
+ | ja ->vm_growstack_c // Need to grow stack.
+ | set_vmstate C
+ if (op == BC_FUNCC) {
+ | call KBASE // (lua_State *L)
+ } else {
+ | // (lua_State *L, lua_CFunction f)
+ | call aword [DISPATCH+DISPATCH_GL(wrapf)]
+ }
+ | // nresults returned in eax (RD).
+ | mov BASE, L:RB->base
+ | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB
+ | set_vmstate INTERP
+ | lea RA, [BASE+RD*8]
+ | neg RA
+ | add RA, L:RB->top // RA = (L->top-(L->base+nresults))*8
+ | mov PC, [BASE-8] // Fetch PC of caller.
+ | jmp ->vm_returnc
+ break;
+
+ /* ---------------------------------------------------------------------- */
+
+ default:
+ fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
+ exit(2);
+ break;
+ }
+}
+
+static int build_backend(BuildCtx *ctx)
+{
+ int op;
+ dasm_growpc(Dst, BC__MAX);
+ build_subroutines(ctx);
+ |.code_op
+ for (op = 0; op < BC__MAX; op++)
+ build_ins(ctx, (BCOp)op, op);
+ return BC__MAX;
+}
+
+/* Emit pseudo frame-info for all assembler functions. */
+static void emit_asm_debug(BuildCtx *ctx)
+{
+ int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe0:\n"
+ "\t.long .LECIE0-.LSCIE0\n"
+ ".LSCIE0:\n"
+ "\t.long 0xffffffff\n"
+ "\t.byte 0x1\n"
+ "\t.string \"\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -8\n"
+ "\t.byte 0x10\n"
+ "\t.byte 0xc\n\t.uleb128 0x7\n\t.uleb128 8\n"
+ "\t.byte 0x80+0x10\n\t.uleb128 0x1\n"
+ "\t.align 8\n"
+ ".LECIE0:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE0:\n"
+ "\t.long .LEFDE0-.LASFDE0\n"
+ ".LASFDE0:\n"
+ "\t.long .Lframe0\n"
+ "\t.quad .Lbegin\n"
+ "\t.quad %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+ "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */
+ "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */
+#if LJ_NO_UNWIND
+ "\t.byte 0x8d\n\t.uleb128 0x6\n" /* offset r13 */
+ "\t.byte 0x8c\n\t.uleb128 0x7\n" /* offset r12 */
+#endif
+ "\t.align 8\n"
+ ".LEFDE0:\n\n", fcofs, CFRAME_SIZE);
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".LSFDE1:\n"
+ "\t.long .LEFDE1-.LASFDE1\n"
+ ".LASFDE1:\n"
+ "\t.long .Lframe0\n"
+ "\t.quad lj_vm_ffi_call\n"
+ "\t.quad %d\n"
+ "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+ "\t.align 8\n"
+ ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
+#endif
+#if !LJ_NO_UNWIND
+#if LJ_TARGET_SOLARIS
+ fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@unwind\n");
+#else
+ fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n");
+#endif
+ fprintf(ctx->fp,
+ ".Lframe1:\n"
+ "\t.long .LECIE1-.LSCIE1\n"
+ ".LSCIE1:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zPR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -8\n"
+ "\t.byte 0x10\n"
+ "\t.uleb128 6\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.long lj_err_unwind_dwarf-.\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 0x7\n\t.uleb128 8\n"
+ "\t.byte 0x80+0x10\n\t.uleb128 0x1\n"
+ "\t.align 8\n"
+ ".LECIE1:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE2:\n"
+ "\t.long .LEFDE2-.LASFDE2\n"
+ ".LASFDE2:\n"
+ "\t.long .LASFDE2-.Lframe1\n"
+ "\t.long .Lbegin-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+ "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */
+ "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */
+ "\t.align 8\n"
+ ".LEFDE2:\n\n", fcofs, CFRAME_SIZE);
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".Lframe2:\n"
+ "\t.long .LECIE2-.LSCIE2\n"
+ ".LSCIE2:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -8\n"
+ "\t.byte 0x10\n"
+ "\t.uleb128 1\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 0x7\n\t.uleb128 8\n"
+ "\t.byte 0x80+0x10\n\t.uleb128 0x1\n"
+ "\t.align 8\n"
+ ".LECIE2:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE3:\n"
+ "\t.long .LEFDE3-.LASFDE3\n"
+ ".LASFDE3:\n"
+ "\t.long .LASFDE3-.Lframe2\n"
+ "\t.long lj_vm_ffi_call-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+ "\t.align 8\n"
+ ".LEFDE3:\n\n", (int)ctx->codesz - fcofs);
+#endif
+#endif
+ break;
+#if !LJ_NO_UNWIND
+ /* Mental note: never let Apple design an assembler.
+ ** Or a linker. Or a plastic case. But I digress.
+ */
+ case BUILD_machasm: {
+#if LJ_HASFFI
+ int fcsize = 0;
+#endif
+ int i;
+ fprintf(ctx->fp, "\t.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support\n");
+ fprintf(ctx->fp,
+ "EH_frame1:\n"
+ "\t.set L$set$x,LECIEX-LSCIEX\n"
+ "\t.long L$set$x\n"
+ "LSCIEX:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.ascii \"zPR\\0\"\n"
+ "\t.byte 0x1\n"
+ "\t.byte 128-8\n"
+ "\t.byte 0x10\n"
+ "\t.byte 6\n" /* augmentation length */
+ "\t.byte 0x9b\n" /* indirect|pcrel|sdata4 */
+ "\t.long _lj_err_unwind_dwarf+4@GOTPCREL\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.byte 0x7\n\t.byte 8\n"
+ "\t.byte 0x80+0x10\n\t.byte 0x1\n"
+ "\t.align 3\n"
+ "LECIEX:\n\n");
+ for (i = 0; i < ctx->nsym; i++) {
+ const char *name = ctx->sym[i].name;
+ int32_t size = ctx->sym[i+1].ofs - ctx->sym[i].ofs;
+ if (size == 0) continue;
+#if LJ_HASFFI
+ if (!strcmp(name, "_lj_vm_ffi_call")) { fcsize = size; continue; }
+#endif
+ fprintf(ctx->fp,
+ "%s.eh:\n"
+ "LSFDE%d:\n"
+ "\t.set L$set$%d,LEFDE%d-LASFDE%d\n"
+ "\t.long L$set$%d\n"
+ "LASFDE%d:\n"
+ "\t.long LASFDE%d-EH_frame1\n"
+ "\t.long %s-.\n"
+ "\t.long %d\n"
+ "\t.byte 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.byte %d\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */
+ "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */
+ "\t.byte 0x8f\n\t.byte 0x4\n" /* offset r15 */
+ "\t.byte 0x8e\n\t.byte 0x5\n" /* offset r14 */
+ "\t.align 3\n"
+ "LEFDE%d:\n\n",
+ name, i, i, i, i, i, i, i, name, size, CFRAME_SIZE, i);
+ }
+#if LJ_HASFFI
+ if (fcsize) {
+ fprintf(ctx->fp,
+ "EH_frame2:\n"
+ "\t.set L$set$y,LECIEY-LSCIEY\n"
+ "\t.long L$set$y\n"
+ "LSCIEY:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.ascii \"zR\\0\"\n"
+ "\t.byte 0x1\n"
+ "\t.byte 128-8\n"
+ "\t.byte 0x10\n"
+ "\t.byte 1\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.byte 0x7\n\t.byte 8\n"
+ "\t.byte 0x80+0x10\n\t.byte 0x1\n"
+ "\t.align 3\n"
+ "LECIEY:\n\n");
+ fprintf(ctx->fp,
+ "_lj_vm_ffi_call.eh:\n"
+ "LSFDEY:\n"
+ "\t.set L$set$yy,LEFDEY-LASFDEY\n"
+ "\t.long L$set$yy\n"
+ "LASFDEY:\n"
+ "\t.long LASFDEY-EH_frame2\n"
+ "\t.long _lj_vm_ffi_call-.\n"
+ "\t.long %d\n"
+ "\t.byte 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.byte 16\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */
+ "\t.byte 0xd\n\t.byte 0x6\n" /* def_cfa_register rbp */
+ "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */
+ "\t.align 3\n"
+ "LEFDEY:\n\n", fcsize);
+ }
+#endif
+ fprintf(ctx->fp, ".subsections_via_symbols\n");
+ }
+ break;
+#endif
+ default: /* Difficult for other modes. */
+ break;
+ }
+}
+