]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
bpf: Move bpf map owner out of common struct
authorDaniel Borkmann <daniel@iogearbox.net>
Wed, 30 Jul 2025 23:47:31 +0000 (01:47 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 15 Aug 2025 14:39:14 +0000 (16:39 +0200)
[ Upstream commit fd1c98f0ef5cbcec842209776505d9e70d8fcd53 ]

Given this is only relevant for BPF tail call maps, it is adding up space
and penalizing other map types. We also need to extend this with further
objects to track / compare to. Therefore, lets move this out into a separate
structure and dynamically allocate it only for BPF tail call maps.

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/r/20250730234733.530041-2-daniel@iogearbox.net
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Stable-dep-of: abad3d0bad72 ("bpf: Fix oob access in cgroup local storage")
Signed-off-by: Sasha Levin <sashal@kernel.org>
include/linux/bpf.h
kernel/bpf/core.c
kernel/bpf/syscall.c

index f9900a23ca16a4f70af30806f294fe5513505a8c..a2876101f9b64a2c8988818a7c7631a508743bca 100644 (file)
@@ -260,6 +260,18 @@ struct bpf_list_node_kern {
        void *owner;
 } __attribute__((aligned(8)));
 
+/* 'Ownership' of program-containing map is claimed by the first program
+ * that is going to use this map or by the first program which FD is
+ * stored in the map to make sure that all callers and callees have the
+ * same prog type, JITed flag and xdp_has_frags flag.
+ */
+struct bpf_map_owner {
+       enum bpf_prog_type type;
+       bool jited;
+       bool xdp_has_frags;
+       const struct btf_type *attach_func_proto;
+};
+
 struct bpf_map {
        const struct bpf_map_ops *ops;
        struct bpf_map *inner_map_meta;
@@ -292,18 +304,8 @@ struct bpf_map {
                struct rcu_head rcu;
        };
        atomic64_t writecnt;
-       /* 'Ownership' of program-containing map is claimed by the first program
-        * that is going to use this map or by the first program which FD is
-        * stored in the map to make sure that all callers and callees have the
-        * same prog type, JITed flag and xdp_has_frags flag.
-        */
-       struct {
-               const struct btf_type *attach_func_proto;
-               spinlock_t lock;
-               enum bpf_prog_type type;
-               bool jited;
-               bool xdp_has_frags;
-       } owner;
+       spinlock_t owner_lock;
+       struct bpf_map_owner *owner;
        bool bypass_spec_v1;
        bool frozen; /* write-once; write-protected by freeze_mutex */
        bool free_after_mult_rcu_gp;
@@ -2072,6 +2074,16 @@ static inline bool bpf_map_flags_access_ok(u32 access_flags)
               (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
 }
 
+static inline struct bpf_map_owner *bpf_map_owner_alloc(struct bpf_map *map)
+{
+       return kzalloc(sizeof(*map->owner), GFP_ATOMIC);
+}
+
+static inline void bpf_map_owner_free(struct bpf_map *map)
+{
+       kfree(map->owner);
+}
+
 struct bpf_event_entry {
        struct perf_event *event;
        struct file *perf_file;
index 93e49b0c218ba93492be223ea57a4d330ecc7506..9abc37739ca5f321f989264948103061e28eb07d 100644 (file)
@@ -2365,28 +2365,29 @@ static bool __bpf_prog_map_compatible(struct bpf_map *map,
                                      const struct bpf_prog *fp)
 {
        enum bpf_prog_type prog_type = resolve_prog_type(fp);
-       bool ret;
        struct bpf_prog_aux *aux = fp->aux;
+       bool ret = false;
 
        if (fp->kprobe_override)
-               return false;
+               return ret;
 
-       spin_lock(&map->owner.lock);
-       if (!map->owner.type) {
-               /* There's no owner yet where we could check for
-                * compatibility.
-                */
-               map->owner.type  = prog_type;
-               map->owner.jited = fp->jited;
-               map->owner.xdp_has_frags = aux->xdp_has_frags;
-               map->owner.attach_func_proto = aux->attach_func_proto;
+       spin_lock(&map->owner_lock);
+       /* There's no owner yet where we could check for compatibility. */
+       if (!map->owner) {
+               map->owner = bpf_map_owner_alloc(map);
+               if (!map->owner)
+                       goto err;
+               map->owner->type  = prog_type;
+               map->owner->jited = fp->jited;
+               map->owner->xdp_has_frags = aux->xdp_has_frags;
+               map->owner->attach_func_proto = aux->attach_func_proto;
                ret = true;
        } else {
-               ret = map->owner.type  == prog_type &&
-                     map->owner.jited == fp->jited &&
-                     map->owner.xdp_has_frags == aux->xdp_has_frags;
+               ret = map->owner->type  == prog_type &&
+                     map->owner->jited == fp->jited &&
+                     map->owner->xdp_has_frags == aux->xdp_has_frags;
                if (ret &&
-                   map->owner.attach_func_proto != aux->attach_func_proto) {
+                   map->owner->attach_func_proto != aux->attach_func_proto) {
                        switch (prog_type) {
                        case BPF_PROG_TYPE_TRACING:
                        case BPF_PROG_TYPE_LSM:
@@ -2399,8 +2400,8 @@ static bool __bpf_prog_map_compatible(struct bpf_map *map,
                        }
                }
        }
-       spin_unlock(&map->owner.lock);
-
+err:
+       spin_unlock(&map->owner_lock);
        return ret;
 }
 
index 82ae4fadecf03cea49edef2fe3a0325c84676217..88511a9bc114a029b4affb43e1ddc2ca258896bb 100644 (file)
@@ -887,6 +887,7 @@ static void bpf_map_free_deferred(struct work_struct *work)
 
        security_bpf_map_free(map);
        bpf_map_release_memcg(map);
+       bpf_map_owner_free(map);
        bpf_map_free(map);
 }
 
@@ -981,12 +982,12 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
        struct bpf_map *map = filp->private_data;
        u32 type = 0, jited = 0;
 
-       if (map_type_contains_progs(map)) {
-               spin_lock(&map->owner.lock);
-               type  = map->owner.type;
-               jited = map->owner.jited;
-               spin_unlock(&map->owner.lock);
+       spin_lock(&map->owner_lock);
+       if (map->owner) {
+               type  = map->owner->type;
+               jited = map->owner->jited;
        }
+       spin_unlock(&map->owner_lock);
 
        seq_printf(m,
                   "map_type:\t%u\n"
@@ -1496,7 +1497,7 @@ static int map_create(union bpf_attr *attr, bool kernel)
        atomic64_set(&map->refcnt, 1);
        atomic64_set(&map->usercnt, 1);
        mutex_init(&map->freeze_mutex);
-       spin_lock_init(&map->owner.lock);
+       spin_lock_init(&map->owner_lock);
 
        if (attr->btf_key_type_id || attr->btf_value_type_id ||
            /* Even the map's value is a kernel's struct,