]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
bpf: Remove cgroup local storage percpu counter
authorAmery Hung <ameryhung@gmail.com>
Thu, 5 Feb 2026 22:29:05 +0000 (14:29 -0800)
committerMartin KaFai Lau <martin.lau@kernel.org>
Fri, 6 Feb 2026 22:29:14 +0000 (14:29 -0800)
The percpu counter in cgroup local storage is no longer needed as the
underlying bpf_local_storage can now handle deadlock with the help of
rqspinlock. Remove the percpu counter and related migrate_{disable,
enable}.

Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Amery Hung <ameryhung@gmail.com>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://patch.msgid.link/20260205222916.1788211-8-ameryhung@gmail.com
kernel/bpf/bpf_cgrp_storage.c

index 8fef24fcac682917a75414f03fbc64938a168650..4d84611d822292691b94a8140a421176169fe85b 100644 (file)
 
 DEFINE_BPF_STORAGE_CACHE(cgroup_cache);
 
-static DEFINE_PER_CPU(int, bpf_cgrp_storage_busy);
-
-static void bpf_cgrp_storage_lock(void)
-{
-       cant_migrate();
-       this_cpu_inc(bpf_cgrp_storage_busy);
-}
-
-static void bpf_cgrp_storage_unlock(void)
-{
-       this_cpu_dec(bpf_cgrp_storage_busy);
-}
-
-static bool bpf_cgrp_storage_trylock(void)
-{
-       cant_migrate();
-       if (unlikely(this_cpu_inc_return(bpf_cgrp_storage_busy) != 1)) {
-               this_cpu_dec(bpf_cgrp_storage_busy);
-               return false;
-       }
-       return true;
-}
-
 static struct bpf_local_storage __rcu **cgroup_storage_ptr(void *owner)
 {
        struct cgroup *cg = owner;
@@ -45,16 +22,14 @@ void bpf_cgrp_storage_free(struct cgroup *cgroup)
 {
        struct bpf_local_storage *local_storage;
 
-       rcu_read_lock_dont_migrate();
+       rcu_read_lock();
        local_storage = rcu_dereference(cgroup->bpf_cgrp_storage);
        if (!local_storage)
                goto out;
 
-       bpf_cgrp_storage_lock();
        bpf_local_storage_destroy(local_storage);
-       bpf_cgrp_storage_unlock();
 out:
-       rcu_read_unlock_migrate();
+       rcu_read_unlock();
 }
 
 static struct bpf_local_storage_data *
@@ -83,9 +58,7 @@ static void *bpf_cgrp_storage_lookup_elem(struct bpf_map *map, void *key)
        if (IS_ERR(cgroup))
                return ERR_CAST(cgroup);
 
-       bpf_cgrp_storage_lock();
        sdata = cgroup_storage_lookup(cgroup, map, true);
-       bpf_cgrp_storage_unlock();
        cgroup_put(cgroup);
        return sdata ? sdata->data : NULL;
 }
@@ -102,10 +75,8 @@ static long bpf_cgrp_storage_update_elem(struct bpf_map *map, void *key,
        if (IS_ERR(cgroup))
                return PTR_ERR(cgroup);
 
-       bpf_cgrp_storage_lock();
        sdata = bpf_local_storage_update(cgroup, (struct bpf_local_storage_map *)map,
                                         value, map_flags, false, GFP_ATOMIC);
-       bpf_cgrp_storage_unlock();
        cgroup_put(cgroup);
        return PTR_ERR_OR_ZERO(sdata);
 }
@@ -131,9 +102,7 @@ static long bpf_cgrp_storage_delete_elem(struct bpf_map *map, void *key)
        if (IS_ERR(cgroup))
                return PTR_ERR(cgroup);
 
-       bpf_cgrp_storage_lock();
        err = cgroup_storage_delete(cgroup, map);
-       bpf_cgrp_storage_unlock();
        cgroup_put(cgroup);
        return err;
 }
@@ -150,7 +119,7 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
 
 static void cgroup_storage_map_free(struct bpf_map *map)
 {
-       bpf_local_storage_map_free(map, &cgroup_cache, &bpf_cgrp_storage_busy);
+       bpf_local_storage_map_free(map, &cgroup_cache, NULL);
 }
 
 /* *gfp_flags* is a hidden argument provided by the verifier */
@@ -158,7 +127,6 @@ BPF_CALL_5(bpf_cgrp_storage_get, struct bpf_map *, map, struct cgroup *, cgroup,
           void *, value, u64, flags, gfp_t, gfp_flags)
 {
        struct bpf_local_storage_data *sdata;
-       bool nobusy;
 
        WARN_ON_ONCE(!bpf_rcu_lock_held());
        if (flags & ~(BPF_LOCAL_STORAGE_GET_F_CREATE))
@@ -167,38 +135,27 @@ BPF_CALL_5(bpf_cgrp_storage_get, struct bpf_map *, map, struct cgroup *, cgroup,
        if (!cgroup)
                return (unsigned long)NULL;
 
-       nobusy = bpf_cgrp_storage_trylock();
-
-       sdata = cgroup_storage_lookup(cgroup, map, nobusy);
+       sdata = cgroup_storage_lookup(cgroup, map, true);
        if (sdata)
-               goto unlock;
+               goto out;
 
        /* only allocate new storage, when the cgroup is refcounted */
        if (!percpu_ref_is_dying(&cgroup->self.refcnt) &&
-           (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) && nobusy)
+           (flags & BPF_LOCAL_STORAGE_GET_F_CREATE))
                sdata = bpf_local_storage_update(cgroup, (struct bpf_local_storage_map *)map,
                                                 value, BPF_NOEXIST, false, gfp_flags);
 
-unlock:
-       if (nobusy)
-               bpf_cgrp_storage_unlock();
+out:
        return IS_ERR_OR_NULL(sdata) ? (unsigned long)NULL : (unsigned long)sdata->data;
 }
 
 BPF_CALL_2(bpf_cgrp_storage_delete, struct bpf_map *, map, struct cgroup *, cgroup)
 {
-       int ret;
-
        WARN_ON_ONCE(!bpf_rcu_lock_held());
        if (!cgroup)
                return -EINVAL;
 
-       if (!bpf_cgrp_storage_trylock())
-               return -EBUSY;
-
-       ret = cgroup_storage_delete(cgroup, map);
-       bpf_cgrp_storage_unlock();
-       return ret;
+       return cgroup_storage_delete(cgroup, map);
 }
 
 const struct bpf_map_ops cgrp_storage_map_ops = {