return round_up(max_t(u32, stack_depth, 1), 32);
}
+/* temporary state used for call frame depth calculation */
+struct bpf_subprog_call_depth_info {
+ int ret_insn; /* caller instruction where we return to. */
+ int caller; /* caller subprogram idx */
+ int frame; /* # of consecutive static call stack frames on top of stack */
+};
+
/* starting from main bpf function walk all instructions of the function
* and recursively walk all callees that given function can call.
* Ignore jump and exit insns.
- * Since recursion is prevented by check_cfg() this algorithm
- * only needs a local stack of MAX_CALL_FRAMES to remember callsites
*/
static int check_max_stack_depth_subprog(struct bpf_verifier_env *env, int idx,
+ struct bpf_subprog_call_depth_info *dinfo,
bool priv_stack_supported)
{
struct bpf_subprog_info *subprog = env->subprog_info;
struct bpf_insn *insn = env->prog->insnsi;
int depth = 0, frame = 0, i, subprog_end, subprog_depth;
bool tail_call_reachable = false;
- int ret_insn[MAX_CALL_FRAMES];
- int ret_prog[MAX_CALL_FRAMES];
- int j;
+ int total;
+ int tmp;
+
+ /* no caller idx */
+ dinfo[idx].caller = -1;
i = subprog[idx].start;
if (!priv_stack_supported)
} else {
depth += subprog_depth;
if (depth > MAX_BPF_STACK) {
+ total = 0;
+ for (tmp = idx; tmp >= 0; tmp = dinfo[tmp].caller)
+ total++;
+
verbose(env, "combined stack size of %d calls is %d. Too large\n",
- frame + 1, depth);
+ total, depth);
return -EACCES;
}
}
if (!is_bpf_throw_kfunc(insn + i))
continue;
- if (subprog[idx].is_cb)
- err = true;
- for (int c = 0; c < frame && !err; c++) {
- if (subprog[ret_prog[c]].is_cb) {
+ for (tmp = idx; tmp >= 0 && !err; tmp = dinfo[tmp].caller) {
+ if (subprog[tmp].is_cb) {
err = true;
break;
}
if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i))
continue;
/* remember insn and function to return to */
- ret_insn[frame] = i + 1;
- ret_prog[frame] = idx;
/* find the callee */
next_insn = i + insn[i].imm + 1;
return -EINVAL;
}
}
+
+ /* store caller info for after we return from callee */
+ dinfo[idx].frame = frame;
+ dinfo[idx].ret_insn = i + 1;
+
+ /* push caller idx into callee's dinfo */
+ dinfo[sidx].caller = idx;
+
i = next_insn;
+
idx = sidx;
if (!priv_stack_supported)
subprog[idx].priv_stack_mode = NO_PRIV_STACK;
if (subprog[idx].has_tail_call)
tail_call_reachable = true;
- frame++;
+ frame = subprog_is_global(env, idx) ? 0 : frame + 1;
if (frame >= MAX_CALL_FRAMES) {
verbose(env, "the call stack of %d frames is too deep !\n",
frame);
* tail call counter throughout bpf2bpf calls combined with tailcalls
*/
if (tail_call_reachable)
- for (j = 0; j < frame; j++) {
- if (subprog[ret_prog[j]].is_exception_cb) {
+ for (tmp = idx; tmp >= 0; tmp = dinfo[tmp].caller) {
+ if (subprog[tmp].is_exception_cb) {
verbose(env, "cannot tail call within exception cb\n");
return -EINVAL;
}
- subprog[ret_prog[j]].tail_call_reachable = true;
+ subprog[tmp].tail_call_reachable = true;
}
if (subprog[0].tail_call_reachable)
env->prog->aux->tail_call_reachable = true;
/* end of for() loop means the last insn of the 'subprog'
* was reached. Doesn't matter whether it was JA or EXIT
*/
- if (frame == 0)
+ if (frame == 0 && dinfo[idx].caller < 0)
return 0;
if (subprog[idx].priv_stack_mode != PRIV_STACK_ADAPTIVE)
depth -= round_up_stack_depth(env, subprog[idx].stack_depth);
- frame--;
- i = ret_insn[frame];
- idx = ret_prog[frame];
+
+ /* pop caller idx from callee */
+ idx = dinfo[idx].caller;
+
+ /* retrieve caller state from its frame */
+ frame = dinfo[idx].frame;
+ i = dinfo[idx].ret_insn;
+
goto continue_func;
}
static int check_max_stack_depth(struct bpf_verifier_env *env)
{
enum priv_stack_mode priv_stack_mode = PRIV_STACK_UNKNOWN;
+ struct bpf_subprog_call_depth_info *dinfo;
struct bpf_subprog_info *si = env->subprog_info;
bool priv_stack_supported;
int ret;
+ dinfo = kvcalloc(env->subprog_cnt, sizeof(*dinfo), GFP_KERNEL_ACCOUNT);
+ if (!dinfo)
+ return -ENOMEM;
+
for (int i = 0; i < env->subprog_cnt; i++) {
if (si[i].has_tail_call) {
priv_stack_mode = NO_PRIV_STACK;
for (int i = env->subprog_cnt - 1; i >= 0; i--) {
if (!i || si[i].is_async_cb) {
priv_stack_supported = !i && priv_stack_mode == PRIV_STACK_ADAPTIVE;
- ret = check_max_stack_depth_subprog(env, i, priv_stack_supported);
- if (ret < 0)
+ ret = check_max_stack_depth_subprog(env, i, dinfo,
+ priv_stack_supported);
+ if (ret < 0) {
+ kvfree(dinfo);
return ret;
+ }
}
}
}
}
+ kvfree(dinfo);
+
return 0;
}
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
-__attribute__ ((noinline))
+static __attribute__ ((noinline))
int f1(struct __sk_buff *skb)
{
return skb->len;
}
-__attribute__ ((noinline))
+static __attribute__ ((noinline))
int f2(int val, struct __sk_buff *skb)
{
return f1(skb) + val;
}
-__attribute__ ((noinline))
+static __attribute__ ((noinline))
int f3(int val, struct __sk_buff *skb, int var)
{
return f2(var, skb) + val;
}
-__attribute__ ((noinline))
+static __attribute__ ((noinline))
int f4(struct __sk_buff *skb)
{
return f3(1, skb, 2);
}
-__attribute__ ((noinline))
+static __attribute__ ((noinline))
int f5(struct __sk_buff *skb)
{
return f4(skb);
}
-__attribute__ ((noinline))
+static __attribute__ ((noinline))
int f6(struct __sk_buff *skb)
{
return f5(skb);
}
-__attribute__ ((noinline))
+static __attribute__ ((noinline))
int f7(struct __sk_buff *skb)
{
return f6(skb);
}
-__attribute__ ((noinline))
+static __attribute__ ((noinline))
int f8(struct __sk_buff *skb)
{
return f7(skb);
}
SEC("tc")
-__failure __msg("the call stack of 8 frames")
+__failure __msg("the call stack of 9 frames")
int global_func3(struct __sk_buff *skb)
{
return f8(skb);