static bool is_bpf_throw_kfunc(struct bpf_insn *insn);
static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id);
+static bool is_task_work_add_kfunc(u32 func_id);
static bool is_sync_callback_calling_function(enum bpf_func_id func_id)
{
(bpf_pseudo_kfunc_call(insn) && is_async_callback_calling_kfunc(insn->imm));
}
+static bool is_async_cb_sleepable(struct bpf_verifier_env *env, struct bpf_insn *insn)
+{
+ /* bpf_timer callbacks are never sleepable. */
+ if (bpf_helper_call(insn) && insn->imm == BPF_FUNC_timer_set_callback)
+ return false;
+
+ /* bpf_wq and bpf_task_work callbacks are always sleepable. */
+ if (bpf_pseudo_kfunc_call(insn) && insn->off == 0 &&
+ (is_bpf_wq_set_callback_impl_kfunc(insn->imm) || is_task_work_add_kfunc(insn->imm)))
+ return true;
+
+ verifier_bug(env, "unhandled async callback in is_async_cb_sleepable");
+ return false;
+}
+
static bool is_may_goto_insn(struct bpf_insn *insn)
{
return insn->code == (BPF_JMP | BPF_JCOND) && insn->src_reg == BPF_MAY_GOTO;
static bool in_sleepable(struct bpf_verifier_env *env)
{
- return env->prog->sleepable ||
- (env->cur_state && env->cur_state->in_sleepable);
+ return env->cur_state->in_sleepable;
}
/* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock()
struct bpf_func_state *callee,
int insn_idx);
-static bool is_task_work_add_kfunc(u32 func_id);
-
static int set_callee_state(struct bpf_verifier_env *env,
struct bpf_func_state *caller,
struct bpf_func_state *callee, int insn_idx);
env->subprog_info[subprog].is_async_cb = true;
async_cb = push_async_cb(env, env->subprog_info[subprog].start,
insn_idx, subprog,
- is_bpf_wq_set_callback_impl_kfunc(insn->imm) ||
- is_task_work_add_kfunc(insn->imm));
+ is_async_cb_sleepable(env, insn));
if (!async_cb)
return -EFAULT;
callee = async_cb->frame[0];
return -EINVAL;
}
- if (in_sleepable(env) && is_storage_get_function(func_id))
+ if (is_storage_get_function(func_id))
env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
}
return -EINVAL;
}
- if (in_sleepable(env) && is_storage_get_function(func_id))
+ if (is_storage_get_function(func_id))
env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
}
return -EINVAL;
}
- if (in_sleepable(env) && is_storage_get_function(func_id))
+ if (is_storage_get_function(func_id))
env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
}
+ /*
+ * Non-sleepable contexts in sleepable programs (e.g., timer callbacks)
+ * are atomic and must use GFP_ATOMIC for storage_get helpers.
+ */
+ if (!in_sleepable(env) && is_storage_get_function(func_id))
+ env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
+
meta.func_id = func_id;
/* check args */
for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
}
if (is_storage_get_function(insn->imm)) {
- if (!in_sleepable(env) ||
- env->insn_aux_data[i + delta].storage_get_func_atomic)
+ if (env->insn_aux_data[i + delta].storage_get_func_atomic)
insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC);
else
insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_KERNEL);
state->curframe = 0;
state->speculative = false;
state->branches = 1;
+ state->in_sleepable = env->prog->sleepable;
state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL_ACCOUNT);
if (!state->frame[0]) {
kfree(state);