}
static int process_timer_func(struct bpf_verifier_env *env, int regno,
- struct bpf_call_arg_meta *meta)
+ struct bpf_map_desc *map)
{
if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
verbose(env, "bpf_timer cannot be used for PREEMPT_RT.\n");
return -EOPNOTSUPP;
}
- return check_map_field_pointer(env, regno, BPF_TIMER, &meta->map);
+ return check_map_field_pointer(env, regno, BPF_TIMER, map);
+}
+
+static int process_timer_helper(struct bpf_verifier_env *env, int regno,
+ struct bpf_call_arg_meta *meta)
+{
+ return process_timer_func(env, regno, &meta->map);
+}
+
+static int process_timer_kfunc(struct bpf_verifier_env *env, int regno,
+ struct bpf_kfunc_call_arg_meta *meta)
+{
+ return process_timer_func(env, regno, &meta->map);
}
static int process_kptr_func(struct bpf_verifier_env *env, int regno,
}
break;
case ARG_PTR_TO_TIMER:
- err = process_timer_func(env, regno, meta);
+ err = process_timer_helper(env, regno, meta);
if (err)
return err;
break;
KF_ARG_WORKQUEUE_ID,
KF_ARG_RES_SPIN_LOCK_ID,
KF_ARG_TASK_WORK_ID,
- KF_ARG_PROG_AUX_ID
+ KF_ARG_PROG_AUX_ID,
+ KF_ARG_TIMER_ID
};
BTF_ID_LIST(kf_arg_btf_ids)
BTF_ID(struct, bpf_res_spin_lock)
BTF_ID(struct, bpf_task_work)
BTF_ID(struct, bpf_prog_aux)
+BTF_ID(struct, bpf_timer)
static bool __is_kfunc_ptr_arg_type(const struct btf *btf,
const struct btf_param *arg, int type)
return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RB_NODE_ID);
}
+static bool is_kfunc_arg_timer(const struct btf *btf, const struct btf_param *arg)
+{
+ return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_TIMER_ID);
+}
+
static bool is_kfunc_arg_wq(const struct btf *btf, const struct btf_param *arg)
{
return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_WORKQUEUE_ID);
KF_ARG_PTR_TO_NULL,
KF_ARG_PTR_TO_CONST_STR,
KF_ARG_PTR_TO_MAP,
+ KF_ARG_PTR_TO_TIMER,
KF_ARG_PTR_TO_WORKQUEUE,
KF_ARG_PTR_TO_IRQ_FLAG,
KF_ARG_PTR_TO_RES_SPIN_LOCK,
if (is_kfunc_arg_wq(meta->btf, &args[argno]))
return KF_ARG_PTR_TO_WORKQUEUE;
+ if (is_kfunc_arg_timer(meta->btf, &args[argno]))
+ return KF_ARG_PTR_TO_TIMER;
+
if (is_kfunc_arg_task_work(meta->btf, &args[argno]))
return KF_ARG_PTR_TO_TASK_WORK;
case KF_ARG_PTR_TO_REFCOUNTED_KPTR:
case KF_ARG_PTR_TO_CONST_STR:
case KF_ARG_PTR_TO_WORKQUEUE:
+ case KF_ARG_PTR_TO_TIMER:
case KF_ARG_PTR_TO_TASK_WORK:
case KF_ARG_PTR_TO_IRQ_FLAG:
case KF_ARG_PTR_TO_RES_SPIN_LOCK:
if (ret < 0)
return ret;
break;
+ case KF_ARG_PTR_TO_TIMER:
+ if (reg->type != PTR_TO_MAP_VALUE) {
+ verbose(env, "arg#%d doesn't point to a map value\n", i);
+ return -EINVAL;
+ }
+ ret = process_timer_kfunc(env, regno, meta);
+ if (ret < 0)
+ return ret;
+ break;
case KF_ARG_PTR_TO_TASK_WORK:
if (reg->type != PTR_TO_MAP_VALUE) {
verbose(env, "arg#%d doesn't point to a map value\n", i);
}
}
- if (btf_record_has_field(map->record, BPF_TIMER)) {
- if (is_tracing_prog_type(prog_type)) {
- verbose(env, "tracing progs cannot use bpf_timer yet\n");
- return -EINVAL;
- }
- }
-
- if (btf_record_has_field(map->record, BPF_WORKQUEUE)) {
- if (is_tracing_prog_type(prog_type)) {
- verbose(env, "tracing progs cannot use bpf_wq yet\n");
- return -EINVAL;
- }
- }
-
if ((bpf_prog_is_offloaded(prog->aux) || bpf_map_is_offloaded(map)) &&
!bpf_offload_prog_map_match(prog, map)) {
verbose(env, "offload device mismatch between prog and map\n");