]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
bpf: Move bpf map owner out of common struct
authorDaniel Borkmann <daniel@iogearbox.net>
Mon, 1 Sep 2025 17:34:50 +0000 (13:34 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 9 Sep 2025 16:54:11 +0000 (18:54 +0200)
[ Upstream commit fd1c98f0ef5cbcec842209776505d9e70d8fcd53 ]

Given this is only relevant for BPF tail call maps, it is adding up space
and penalizing other map types. We also need to extend this with further
objects to track / compare to. Therefore, lets move this out into a separate
structure and dynamically allocate it only for BPF tail call maps.

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/r/20250730234733.530041-2-daniel@iogearbox.net
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
include/linux/bpf.h
kernel/bpf/core.c
kernel/bpf/syscall.c

index 9fac355afde7ab44850560059f3fb9093599d267..8f11c61606839c07b4ed2f904671324de12bac62 100644 (file)
@@ -217,6 +217,18 @@ struct bpf_map_off_arr {
        u8 field_sz[BPF_MAP_OFF_ARR_MAX];
 };
 
+/* 'Ownership' of program-containing map is claimed by the first program
+ * that is going to use this map or by the first program which FD is
+ * stored in the map to make sure that all callers and callees have the
+ * same prog type, JITed flag and xdp_has_frags flag.
+ */
+struct bpf_map_owner {
+       enum bpf_prog_type type;
+       bool jited;
+       bool xdp_has_frags;
+       const struct btf_type *attach_func_proto;
+};
+
 struct bpf_map {
        /* The first two cachelines with read-mostly members of which some
         * are also accessed in fast-path (e.g. ops, max_entries).
@@ -258,18 +270,8 @@ struct bpf_map {
        };
        struct mutex freeze_mutex;
        atomic64_t writecnt;
-       /* 'Ownership' of program-containing map is claimed by the first program
-        * that is going to use this map or by the first program which FD is
-        * stored in the map to make sure that all callers and callees have the
-        * same prog type, JITed flag and xdp_has_frags flag.
-        */
-       struct {
-               const struct btf_type *attach_func_proto;
-               spinlock_t lock;
-               enum bpf_prog_type type;
-               bool jited;
-               bool xdp_has_frags;
-       } owner;
+       spinlock_t owner_lock;
+       struct bpf_map_owner *owner;
        bool bypass_spec_v1;
        bool frozen; /* write-once; write-protected by freeze_mutex */
        bool free_after_mult_rcu_gp;
@@ -1495,6 +1497,16 @@ static inline bool bpf_map_flags_access_ok(u32 access_flags)
               (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
 }
 
+static inline struct bpf_map_owner *bpf_map_owner_alloc(struct bpf_map *map)
+{
+       return kzalloc(sizeof(*map->owner), GFP_ATOMIC);
+}
+
+static inline void bpf_map_owner_free(struct bpf_map *map)
+{
+       kfree(map->owner);
+}
+
 struct bpf_event_entry {
        struct perf_event *event;
        struct file *perf_file;
index 2ed1d00bede0b25cecdd5e50c94125963bad965e..d4eb6d9f276a5ed9121ae8472b31ad3d04119c5d 100644 (file)
@@ -2120,28 +2120,29 @@ bool bpf_prog_map_compatible(struct bpf_map *map,
                             const struct bpf_prog *fp)
 {
        enum bpf_prog_type prog_type = resolve_prog_type(fp);
-       bool ret;
        struct bpf_prog_aux *aux = fp->aux;
+       bool ret = false;
 
        if (fp->kprobe_override)
-               return false;
+               return ret;
 
-       spin_lock(&map->owner.lock);
-       if (!map->owner.type) {
-               /* There's no owner yet where we could check for
-                * compatibility.
-                */
-               map->owner.type  = prog_type;
-               map->owner.jited = fp->jited;
-               map->owner.xdp_has_frags = aux->xdp_has_frags;
-               map->owner.attach_func_proto = aux->attach_func_proto;
+       spin_lock(&map->owner_lock);
+       /* There's no owner yet where we could check for compatibility. */
+       if (!map->owner) {
+               map->owner = bpf_map_owner_alloc(map);
+               if (!map->owner)
+                       goto err;
+               map->owner->type  = prog_type;
+               map->owner->jited = fp->jited;
+               map->owner->xdp_has_frags = aux->xdp_has_frags;
+               map->owner->attach_func_proto = aux->attach_func_proto;
                ret = true;
        } else {
-               ret = map->owner.type  == prog_type &&
-                     map->owner.jited == fp->jited &&
-                     map->owner.xdp_has_frags == aux->xdp_has_frags;
+               ret = map->owner->type  == prog_type &&
+                     map->owner->jited == fp->jited &&
+                     map->owner->xdp_has_frags == aux->xdp_has_frags;
                if (ret &&
-                   map->owner.attach_func_proto != aux->attach_func_proto) {
+                   map->owner->attach_func_proto != aux->attach_func_proto) {
                        switch (prog_type) {
                        case BPF_PROG_TYPE_TRACING:
                        case BPF_PROG_TYPE_LSM:
@@ -2154,8 +2155,8 @@ bool bpf_prog_map_compatible(struct bpf_map *map,
                        }
                }
        }
-       spin_unlock(&map->owner.lock);
-
+err:
+       spin_unlock(&map->owner_lock);
        return ret;
 }
 
index 377bb60b7916430420d0c5c5b35084309e9593d3..c15d243bfe382815d1b7d6f4a7ac1c9322066783 100644 (file)
@@ -631,6 +631,7 @@ static void bpf_map_free_deferred(struct work_struct *work)
        security_bpf_map_free(map);
        kfree(map->off_arr);
        bpf_map_release_memcg(map);
+       bpf_map_owner_free(map);
        /* implementation dependent freeing, map_free callback also does
         * bpf_map_free_kptr_off_tab, if needed.
         */
@@ -738,12 +739,12 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
        struct bpf_map *map = filp->private_data;
        u32 type = 0, jited = 0;
 
-       if (map_type_contains_progs(map)) {
-               spin_lock(&map->owner.lock);
-               type  = map->owner.type;
-               jited = map->owner.jited;
-               spin_unlock(&map->owner.lock);
+       spin_lock(&map->owner_lock);
+       if (map->owner) {
+               type  = map->owner->type;
+               jited = map->owner->jited;
        }
+       spin_unlock(&map->owner_lock);
 
        seq_printf(m,
                   "map_type:\t%u\n"
@@ -1161,7 +1162,7 @@ static int map_create(union bpf_attr *attr)
        atomic64_set(&map->refcnt, 1);
        atomic64_set(&map->usercnt, 1);
        mutex_init(&map->freeze_mutex);
-       spin_lock_init(&map->owner.lock);
+       spin_lock_init(&map->owner_lock);
 
        map->spin_lock_off = -EINVAL;
        map->timer_off = -EINVAL;