]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
bpf: Add helper and kfunc stack access size resolution
authorAlexei Starovoitov <ast@kernel.org>
Fri, 3 Apr 2026 02:44:21 +0000 (19:44 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 3 Apr 2026 15:34:44 +0000 (08:34 -0700)
The static stack liveness analysis needs to know how many bytes a
helper or kfunc accesses through a stack pointer argument, so it can
precisely mark the affected stack slots as stack 'def' or 'use'.

Add bpf_helper_stack_access_bytes() and bpf_kfunc_stack_access_bytes()
which resolve the access size for a given call argument.

Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20260403024422.87231-7-alexei.starovoitov@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/bpf_verifier.h
kernel/bpf/verifier.c

index 7bd32a8a45f6ab0e59463c9d87b2af45f5231bdd..36bfd96d4563087209d73dfcfe43786df0184256 100644 (file)
@@ -1138,6 +1138,12 @@ bool bpf_is_may_goto_insn(struct bpf_insn *insn);
 void bpf_verbose_insn(struct bpf_verifier_env *env, struct bpf_insn *insn);
 bool bpf_get_call_summary(struct bpf_verifier_env *env, struct bpf_insn *call,
                          struct bpf_call_summary *cs);
+s64 bpf_helper_stack_access_bytes(struct bpf_verifier_env *env,
+                                 struct bpf_insn *insn, int arg,
+                                 int insn_idx);
+s64 bpf_kfunc_stack_access_bytes(struct bpf_verifier_env *env,
+                                struct bpf_insn *insn, int arg,
+                                int insn_idx);
 
 int bpf_stack_liveness_init(struct bpf_verifier_env *env);
 void bpf_stack_liveness_free(struct bpf_verifier_env *env);
index 7d4d0f7e2ca19e4bdb98d39d6938a13195ad603b..84699a428077d66f68203cdcccddee5f8207cd34 100644 (file)
@@ -14132,6 +14132,194 @@ static int fetch_kfunc_arg_meta(struct bpf_verifier_env *env,
        return 0;
 }
 
+/*
+ * Determine how many bytes a helper accesses through a stack pointer at
+ * argument position @arg (0-based, corresponding to R1-R5).
+ *
+ * Returns:
+ *   > 0   known read access size in bytes
+ *     0   doesn't read anything directly
+ * S64_MIN unknown
+ *   < 0   known write access of (-return) bytes
+ */
+s64 bpf_helper_stack_access_bytes(struct bpf_verifier_env *env, struct bpf_insn *insn,
+                                 int arg, int insn_idx)
+{
+       struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
+       const struct bpf_func_proto *fn;
+       enum bpf_arg_type at;
+       s64 size;
+
+       if (get_helper_proto(env, insn->imm, &fn) < 0)
+               return S64_MIN;
+
+       at = fn->arg_type[arg];
+
+       switch (base_type(at)) {
+       case ARG_PTR_TO_MAP_KEY:
+       case ARG_PTR_TO_MAP_VALUE: {
+               bool is_key = base_type(at) == ARG_PTR_TO_MAP_KEY;
+               u64 val;
+               int i, map_reg;
+
+               for (i = 0; i < arg; i++) {
+                       if (base_type(fn->arg_type[i]) == ARG_CONST_MAP_PTR)
+                               break;
+               }
+               if (i >= arg)
+                       goto scan_all_maps;
+
+               map_reg = BPF_REG_1 + i;
+
+               if (!(aux->const_reg_map_mask & BIT(map_reg)))
+                       goto scan_all_maps;
+
+               i = aux->const_reg_vals[map_reg];
+               if (i < env->used_map_cnt) {
+                       size = is_key ? env->used_maps[i]->key_size
+                                     : env->used_maps[i]->value_size;
+                       goto out;
+               }
+scan_all_maps:
+               /*
+                * Map pointer is not known at this call site (e.g. different
+                * maps on merged paths).  Conservatively return the largest
+                * key_size or value_size across all maps used by the program.
+                */
+               val = 0;
+               for (i = 0; i < env->used_map_cnt; i++) {
+                       struct bpf_map *map = env->used_maps[i];
+                       u32 sz = is_key ? map->key_size : map->value_size;
+
+                       if (sz > val)
+                               val = sz;
+                       if (map->inner_map_meta) {
+                               sz = is_key ? map->inner_map_meta->key_size
+                                           : map->inner_map_meta->value_size;
+                               if (sz > val)
+                                       val = sz;
+                       }
+               }
+               if (!val)
+                       return S64_MIN;
+               size = val;
+               goto out;
+       }
+       case ARG_PTR_TO_MEM:
+               if (at & MEM_FIXED_SIZE) {
+                       size = fn->arg_size[arg];
+                       goto out;
+               }
+               if (arg + 1 < ARRAY_SIZE(fn->arg_type) &&
+                   arg_type_is_mem_size(fn->arg_type[arg + 1])) {
+                       int size_reg = BPF_REG_1 + arg + 1;
+
+                       if (aux->const_reg_mask & BIT(size_reg)) {
+                               size = (s64)aux->const_reg_vals[size_reg];
+                               goto out;
+                       }
+                       /*
+                        * Size arg is const on each path but differs across merged
+                        * paths. MAX_BPF_STACK is a safe upper bound for reads.
+                        */
+                       if (at & MEM_UNINIT)
+                               return 0;
+                       return MAX_BPF_STACK;
+               }
+               return S64_MIN;
+       case ARG_PTR_TO_DYNPTR:
+               size = BPF_DYNPTR_SIZE;
+               break;
+       case ARG_PTR_TO_STACK:
+               /*
+                * Only used by bpf_calls_callback() helpers. The helper itself
+                * doesn't access stack. The callback subprog does and it's
+                * analyzed separately.
+                */
+               return 0;
+       default:
+               return S64_MIN;
+       }
+out:
+       /*
+        * MEM_UNINIT args are write-only: the helper initializes the
+        * buffer without reading it.
+        */
+       if (at & MEM_UNINIT)
+               return -size;
+       return size;
+}
+
+/*
+ * Determine how many bytes a kfunc accesses through a stack pointer at
+ * argument position @arg (0-based, corresponding to R1-R5).
+ *
+ * Returns:
+ *   > 0      known read access size in bytes
+ *     0      doesn't access memory through that argument (ex: not a pointer)
+ *   S64_MIN  unknown
+ *   < 0      known write access of (-return) bytes
+ */
+s64 bpf_kfunc_stack_access_bytes(struct bpf_verifier_env *env, struct bpf_insn *insn,
+                                int arg, int insn_idx)
+{
+       struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
+       struct bpf_kfunc_call_arg_meta meta;
+       const struct btf_param *args;
+       const struct btf_type *t, *ref_t;
+       const struct btf *btf;
+       u32 nargs, type_size;
+       s64 size;
+
+       if (fetch_kfunc_arg_meta(env, insn->imm, insn->off, &meta) < 0)
+               return S64_MIN;
+
+       btf = meta.btf;
+       args = btf_params(meta.func_proto);
+       nargs = btf_type_vlen(meta.func_proto);
+       if (arg >= nargs)
+               return 0;
+
+       t = btf_type_skip_modifiers(btf, args[arg].type, NULL);
+       if (!btf_type_is_ptr(t))
+               return 0;
+
+       /* dynptr: fixed 16-byte on-stack representation */
+       if (is_kfunc_arg_dynptr(btf, &args[arg])) {
+               size = BPF_DYNPTR_SIZE;
+               goto out;
+       }
+
+       /* ptr + __sz/__szk pair: size is in the next register */
+       if (arg + 1 < nargs &&
+           (btf_param_match_suffix(btf, &args[arg + 1], "__sz") ||
+            btf_param_match_suffix(btf, &args[arg + 1], "__szk"))) {
+               int size_reg = BPF_REG_1 + arg + 1;
+
+               if (aux->const_reg_mask & BIT(size_reg)) {
+                       size = (s64)aux->const_reg_vals[size_reg];
+                       goto out;
+               }
+               return MAX_BPF_STACK;
+       }
+
+       /* fixed-size pointed-to type: resolve via BTF */
+       ref_t = btf_type_skip_modifiers(btf, t->type, NULL);
+       if (!IS_ERR(btf_resolve_size(btf, ref_t, &type_size))) {
+               size = type_size;
+               goto out;
+       }
+
+       return S64_MIN;
+out:
+       /* KF_ITER_NEW kfuncs initialize the iterator state at arg 0 */
+       if (arg == 0 && meta.kfunc_flags & KF_ITER_NEW)
+               return -size;
+       if (is_kfunc_arg_uninit(btf, &args[arg]))
+               return -size;
+       return size;
+}
+
 /* check special kfuncs and return:
  *  1  - not fall-through to 'else' branch, continue verification
  *  0  - fall-through to 'else' branch