*
* [ prev sp ] <-------------
* [ tail_call_info ] 8 |
- * [ nv gpr save area ] 6*8 |
+ * [ nv gpr save area ] 6*8 + (12*8) |
* [ local_tmp_var ] 24 |
* fp (r31) --> [ ebpf stack space ] upto 512 |
* [ frame header ] 32/112 |
* sp (r1) ---> [ stack pointer ] --------------
+ *
+ * Additional (12*8) in 'nv gpr save area' only in case of
+ * exception boundary.
*/
/* for bpf JIT code internal usage */
#define BPF_PPC_STACK_LOCALS 24
+/*
+ * for additional non volatile registers(r14-r25) to be saved
+ * at exception boundary
+ */
+#define BPF_PPC_EXC_STACK_SAVE (12*8)
+
/* stack frame excluding BPF stack, ensure this is quadword aligned */
#define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \
BPF_PPC_STACK_LOCALS + \
BPF_PPC_STACK_SAVE + \
BPF_PPC_TAILCALL)
+/*
+ * same as BPF_PPC_STACKFRAME with save area for additional
+ * non volatile registers saved at exception boundary.
+ * This is quad-word aligned.
+ */
+#define BPF_PPC_EXC_STACKFRAME (BPF_PPC_STACKFRAME + BPF_PPC_EXC_STACK_SAVE)
+
/* BPF register usage */
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
* - we call other functions (kernel helpers), or
* - the bpf program uses its stack area
* The latter condition is deduced from the usage of BPF_REG_FP
+ *
+ * bpf_throw() leads to exception callback from a BPF (sub)program.
+ * The (sub)program is always marked as SEEN_FUNC, creating a stack
+ * frame. The exception callback uses the frame of the exception
+ * boundary, so the exception boundary program must have a frame.
*/
return ctx->seen & SEEN_FUNC ||
bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)) ||
- ctx->exception_cb;
+ ctx->exception_cb ||
+ ctx->exception_boundary;
}
/*
* [ ... ] |
* sp (r1) ---> [ stack pointer ] --------------
* [ tail_call_info ] 8
- * [ nv gpr save area ] 6*8
+ * [ nv gpr save area ] 6*8 + (12*8)
* [ local_tmp_var ] 24
* [ unused red zone ] 224
+ *
+ * Additional (12*8) in 'nv gpr save area' only in case of
+ * exception boundary.
*/
static int bpf_jit_stack_local(struct codegen_context *ctx)
{
return STACK_FRAME_MIN_SIZE + ctx->stack_size;
} else {
/* Stack layout with redzone */
- return -(BPF_PPC_TAILCALL + BPF_PPC_STACK_SAVE + BPF_PPC_STACK_LOCALS);
+ return -(BPF_PPC_TAILCALL
+ +BPF_PPC_STACK_SAVE
+ +(ctx->exception_boundary || ctx->exception_cb ?
+ BPF_PPC_EXC_STACK_SAVE : 0)
+ +BPF_PPC_STACK_LOCALS
+ );
}
}
static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
{
- if (reg >= BPF_PPC_NVR_MIN && reg < 32)
+ int min_valid_nvreg = BPF_PPC_NVR_MIN;
+ /* Default frame size for all cases except exception boundary */
+ int frame_nvr_size = BPF_PPC_STACKFRAME;
+
+ /* Consider all nv regs for handling exceptions */
+ if (ctx->exception_boundary || ctx->exception_cb) {
+ min_valid_nvreg = _R14;
+ frame_nvr_size = BPF_PPC_EXC_STACKFRAME;
+ }
+
+ if (reg >= min_valid_nvreg && reg < 32)
return (bpf_has_stack_frame(ctx) ?
- (BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
+ (frame_nvr_size + ctx->stack_size) : 0)
- (8 * (32 - reg)) - BPF_PPC_TAILCALL;
pr_err("BPF JIT is asking about unknown registers");
{
}
+/*
+ * For exception boundary & exception_cb progs:
+ * return increased size to accommodate additional NVRs.
+ */
+static int bpf_jit_stack_size(struct codegen_context *ctx)
+{
+ return ctx->exception_boundary || ctx->exception_cb ?
+ BPF_PPC_EXC_STACKFRAME :
+ BPF_PPC_STACKFRAME;
+}
+
void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
{
int i;
EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF));
}
- EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size)));
+ EMIT(PPC_RAW_STDU(_R1, _R1,
+ -(bpf_jit_stack_size(ctx) + ctx->stack_size)));
+ }
+
+ /*
+ * Program acting as exception boundary pushes R14..R25 in addition to
+ * BPF callee-saved non volatile registers. Exception callback uses
+ * the boundary program's stack frame, recover additionally saved
+ * registers in epilogue of exception callback.
+ */
+ if (ctx->exception_boundary) {
+ for (i = _R14; i <= _R25; i++)
+ EMIT(PPC_RAW_STD(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
}
if (!ctx->exception_cb) {
EMIT(PPC_RAW_LD(bpf_to_ppc(ARENA_VM_START), _R1,
bpf_jit_stack_offsetof(ctx, bpf_to_ppc(ARENA_VM_START))));
+ if (ctx->exception_cb) {
+ /*
+ * Recover additionally saved non volatile registers from stack
+ * frame of exception boundary program.
+ */
+ for (i = _R14; i <= _R25; i++)
+ EMIT(PPC_RAW_LD(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
+ }
+
/* Tear down our stack frame */
if (bpf_has_stack_frame(ctx)) {
- EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size));
+ EMIT(PPC_RAW_ADDI(_R1, _R1, bpf_jit_stack_size(ctx) + ctx->stack_size));
+
if (ctx->seen & SEEN_FUNC || ctx->exception_cb) {
EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF));
EMIT(PPC_RAW_MTLR(_R0));