]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
bpf: Use kmalloc_nolock() universally in local storage
authorAmery Hung <ameryhung@gmail.com>
Sat, 11 Apr 2026 01:54:17 +0000 (18:54 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Sat, 11 Apr 2026 04:22:32 +0000 (21:22 -0700)
Switch to kmalloc_nolock() universally in local storage. Socket local
storage didn't move to kmalloc_nolock() when BPF memory allocator was
replaced by it for performance reasons. Now that kfree_rcu() supports
freeing memory allocated by kmalloc_nolock(), we can move the remaining
local storages to use kmalloc_nolock() and cleanup the cluttered free
paths.

Use kfree() instead of kfree_nolock() in bpf_selem_free_trace_rcu() and
bpf_local_storage_free_trace_rcu(). Both callbacks run in process context
where spinning is allowed, so kfree_nolock() is unnecessary.

Benchmark:

./bench -p 1 local-storage-create --storage-type socket \
  --batch-size {16,32,64}

The benchmark is a microbenchmark stress-testing how fast local storage
can be created. There is no measurable throughput change for socket local
storage after switching from kzalloc() to kmalloc_nolock().

Socket local storage

                 batch  creation speed              diff
---------------  ----   ------------------          ----
Baseline          16    433.9 ± 0.6 k/s
                  32    434.3 ± 1.4 k/s
                  64    434.2 ± 0.7 k/s

After             16    439.0 ± 1.9 k/s             +1.2%
                  32    437.3 ± 2.0 k/s             +0.7%
                  64    435.8 ± 2.5k/s              +0.4%

Also worth noting that the baseline got a 5% throughput boost when sheaf
replaces percpu partial slab recently [0].

[0] https://lore.kernel.org/bpf/20260123-sheaves-for-all-v4-0-041323d506f7@suse.cz/

Signed-off-by: Amery Hung <ameryhung@gmail.com>
Link: https://lore.kernel.org/r/20260411015419.114016-3-ameryhung@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/bpf_local_storage.h
kernel/bpf/bpf_cgrp_storage.c
kernel/bpf/bpf_inode_storage.c
kernel/bpf/bpf_local_storage.c
kernel/bpf/bpf_task_storage.c
net/core/bpf_sk_storage.c

index 8157e8da61d40cb8cf218e3fccc2b052ed26928f..dced54e9265fc6418fbb02d32cbaacbef0cfb3fd 100644 (file)
@@ -54,7 +54,6 @@ struct bpf_local_storage_map {
        u32 bucket_log;
        u16 elem_size;
        u16 cache_idx;
-       bool use_kmalloc_nolock;
 };
 
 struct bpf_local_storage_data {
@@ -86,8 +85,7 @@ struct bpf_local_storage_elem {
                                                 */
        };
        atomic_t state;
-       bool use_kmalloc_nolock;
-       /* 3 bytes hole */
+       /* 4 bytes hole */
        /* The data is stored in another cacheline to minimize
         * the number of cachelines access during a cache hit.
         */
@@ -104,7 +102,6 @@ struct bpf_local_storage {
        rqspinlock_t lock;      /* Protect adding/removing from the "list" */
        u64 mem_charge;         /* Copy of mem charged to owner. Protected by "lock" */
        refcount_t owner_refcnt;/* Used to pin owner when map_free is uncharging */
-       bool use_kmalloc_nolock;
 };
 
 /* U16_MAX is much more than enough for sk local storage
@@ -137,8 +134,7 @@ int bpf_local_storage_map_alloc_check(union bpf_attr *attr);
 
 struct bpf_map *
 bpf_local_storage_map_alloc(union bpf_attr *attr,
-                           struct bpf_local_storage_cache *cache,
-                           bool use_kmalloc_nolock);
+                           struct bpf_local_storage_cache *cache);
 
 void __bpf_local_storage_insert_cache(struct bpf_local_storage *local_storage,
                                      struct bpf_local_storage_map *smap,
index c2a2ead1f466dc9ccd17589297d0396d7f030507..d93ac28667487fcc641f29ab1d208f6d7cca8cb1 100644 (file)
@@ -114,7 +114,7 @@ static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key)
 
 static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
 {
-       return bpf_local_storage_map_alloc(attr, &cgroup_cache, true);
+       return bpf_local_storage_map_alloc(attr, &cgroup_cache);
 }
 
 static void cgroup_storage_map_free(struct bpf_map *map)
index e86734609f3d26810c80d2e3ac3a41749e181c4c..efc8996a4c0ab1725eb8623d42da0b2744678fdb 100644 (file)
@@ -179,7 +179,7 @@ static int notsupp_get_next_key(struct bpf_map *map, void *key,
 
 static struct bpf_map *inode_storage_map_alloc(union bpf_attr *attr)
 {
-       return bpf_local_storage_map_alloc(attr, &inode_cache, false);
+       return bpf_local_storage_map_alloc(attr, &inode_cache);
 }
 
 static void inode_storage_map_free(struct bpf_map *map)
index cad17ca8552f8d659606ef4833f8eee39d10198b..bc687b9d25a954591607c8cbbbc1f415a173dcdc 100644 (file)
@@ -75,18 +75,12 @@ bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
        if (mem_charge(smap, owner, smap->elem_size))
                return NULL;
 
-       if (smap->use_kmalloc_nolock) {
-               selem = bpf_map_kmalloc_nolock(&smap->map, smap->elem_size,
-                                              __GFP_ZERO, NUMA_NO_NODE);
-       } else {
-               selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
-                                       gfp_flags | __GFP_NOWARN);
-       }
+       selem = bpf_map_kmalloc_nolock(&smap->map, smap->elem_size,
+                                      __GFP_ZERO, NUMA_NO_NODE);
 
        if (selem) {
                RCU_INIT_POINTER(SDATA(selem)->smap, smap);
                atomic_set(&selem->state, 0);
-               selem->use_kmalloc_nolock = smap->use_kmalloc_nolock;
 
                if (value) {
                        /* No need to call check_and_init_map_value as memory is zero init */
@@ -102,8 +96,7 @@ bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
        return NULL;
 }
 
-/* rcu tasks trace callback for use_kmalloc_nolock == false */
-static void __bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
+static void bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
 {
        struct bpf_local_storage *local_storage;
 
@@ -115,47 +108,14 @@ static void __bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
        kfree(local_storage);
 }
 
-/* Handle use_kmalloc_nolock == false */
-static void __bpf_local_storage_free(struct bpf_local_storage *local_storage,
-                                    bool vanilla_rcu)
-{
-       if (vanilla_rcu)
-               kfree_rcu(local_storage, rcu);
-       else
-               call_rcu_tasks_trace(&local_storage->rcu,
-                                    __bpf_local_storage_free_trace_rcu);
-}
-
-static void bpf_local_storage_free_rcu(struct rcu_head *rcu)
-{
-       struct bpf_local_storage *local_storage;
-
-       local_storage = container_of(rcu, struct bpf_local_storage, rcu);
-       kfree_nolock(local_storage);
-}
-
-static void bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
-{
-       /*
-        * RCU Tasks Trace grace period implies RCU grace period, do
-        * kfree() directly.
-        */
-       bpf_local_storage_free_rcu(rcu);
-}
-
 static void bpf_local_storage_free(struct bpf_local_storage *local_storage,
                                   bool reuse_now)
 {
        if (!local_storage)
                return;
 
-       if (!local_storage->use_kmalloc_nolock) {
-               __bpf_local_storage_free(local_storage, reuse_now);
-               return;
-       }
-
        if (reuse_now) {
-               call_rcu(&local_storage->rcu, bpf_local_storage_free_rcu);
+               kfree_rcu(local_storage, rcu);
                return;
        }
 
@@ -163,42 +123,7 @@ static void bpf_local_storage_free(struct bpf_local_storage *local_storage,
                             bpf_local_storage_free_trace_rcu);
 }
 
-/* rcu callback for use_kmalloc_nolock == false */
-static void __bpf_selem_free_rcu(struct rcu_head *rcu)
-{
-       struct bpf_local_storage_elem *selem;
-       struct bpf_local_storage_map *smap;
-
-       selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
-       /* bpf_selem_unlink_nofail may have already cleared smap and freed fields. */
-       smap = rcu_dereference_check(SDATA(selem)->smap, 1);
-
-       if (smap)
-               bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
-       kfree(selem);
-}
-
-/* rcu tasks trace callback for use_kmalloc_nolock == false */
-static void __bpf_selem_free_trace_rcu(struct rcu_head *rcu)
-{
-       /*
-        * RCU Tasks Trace grace period implies RCU grace period, do
-        * kfree() directly.
-        */
-       __bpf_selem_free_rcu(rcu);
-}
-
-/* Handle use_kmalloc_nolock == false */
-static void __bpf_selem_free(struct bpf_local_storage_elem *selem,
-                            bool vanilla_rcu)
-{
-       if (vanilla_rcu)
-               call_rcu(&selem->rcu, __bpf_selem_free_rcu);
-       else
-               call_rcu_tasks_trace(&selem->rcu, __bpf_selem_free_trace_rcu);
-}
-
-static void bpf_selem_free_rcu(struct rcu_head *rcu)
+static void bpf_selem_free_trace_rcu(struct rcu_head *rcu)
 {
        struct bpf_local_storage_elem *selem;
        struct bpf_local_storage_map *smap;
@@ -209,37 +134,24 @@ static void bpf_selem_free_rcu(struct rcu_head *rcu)
 
        if (smap)
                bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
-       kfree_nolock(selem);
-}
-
-static void bpf_selem_free_trace_rcu(struct rcu_head *rcu)
-{
        /*
         * RCU Tasks Trace grace period implies RCU grace period, do
         * kfree() directly.
         */
-       bpf_selem_free_rcu(rcu);
+       kfree(selem);
 }
 
 void bpf_selem_free(struct bpf_local_storage_elem *selem,
                    bool reuse_now)
 {
-       if (!selem->use_kmalloc_nolock) {
-               /*
-                * No uptr will be unpin even when reuse_now == false since uptr
-                * is only supported in task local storage, where
-                * smap->use_kmalloc_nolock == true.
-                */
-               __bpf_selem_free(selem, reuse_now);
-               return;
-       }
+       struct bpf_local_storage_map *smap;
+
+       smap = rcu_dereference_check(SDATA(selem)->smap, 1);
 
        if (reuse_now) {
-               /*
-                * While it is okay to call bpf_obj_free_fields() that unpins uptr when
-                * reuse_now == true, keep it in bpf_selem_free_rcu() for simplicity.
-                */
-               call_rcu(&selem->rcu, bpf_selem_free_rcu);
+               if (smap)
+                       bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
+               kfree_rcu(selem, rcu);
                return;
        }
 
@@ -576,12 +488,8 @@ int bpf_local_storage_alloc(void *owner,
        if (err)
                return err;
 
-       if (smap->use_kmalloc_nolock)
-               storage = bpf_map_kmalloc_nolock(&smap->map, sizeof(*storage),
-                                                __GFP_ZERO, NUMA_NO_NODE);
-       else
-               storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
-                                         gfp_flags | __GFP_NOWARN);
+       storage = bpf_map_kmalloc_nolock(&smap->map, sizeof(*storage),
+                                        __GFP_ZERO, NUMA_NO_NODE);
        if (!storage) {
                err = -ENOMEM;
                goto uncharge;
@@ -591,7 +499,6 @@ int bpf_local_storage_alloc(void *owner,
        raw_res_spin_lock_init(&storage->lock);
        storage->owner = owner;
        storage->mem_charge = sizeof(*storage);
-       storage->use_kmalloc_nolock = smap->use_kmalloc_nolock;
        refcount_set(&storage->owner_refcnt, 1);
 
        bpf_selem_link_storage_nolock(storage, first_selem);
@@ -868,8 +775,7 @@ u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map)
 
 struct bpf_map *
 bpf_local_storage_map_alloc(union bpf_attr *attr,
-                           struct bpf_local_storage_cache *cache,
-                           bool use_kmalloc_nolock)
+                           struct bpf_local_storage_cache *cache)
 {
        struct bpf_local_storage_map *smap;
        unsigned int i;
@@ -901,12 +807,6 @@ bpf_local_storage_map_alloc(union bpf_attr *attr,
        smap->elem_size = offsetof(struct bpf_local_storage_elem,
                                   sdata.data[attr->value_size]);
 
-       /* In PREEMPT_RT, kmalloc(GFP_ATOMIC) is still not safe in non
-        * preemptible context. Thus, enforce all storages to use
-        * kmalloc_nolock() when CONFIG_PREEMPT_RT is enabled.
-        */
-       smap->use_kmalloc_nolock = IS_ENABLED(CONFIG_PREEMPT_RT) ? true : use_kmalloc_nolock;
-
        smap->cache_idx = bpf_local_storage_cache_idx_get(cache);
        return &smap->map;
 
index 605506792b5b40a46c0f828d9e2d539db16f1ca5..55f4f22bb212896f166edd250b23349a23bf1e8d 100644 (file)
@@ -212,7 +212,7 @@ static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key)
 
 static struct bpf_map *task_storage_map_alloc(union bpf_attr *attr)
 {
-       return bpf_local_storage_map_alloc(attr, &task_cache, true);
+       return bpf_local_storage_map_alloc(attr, &task_cache);
 }
 
 static void task_storage_map_free(struct bpf_map *map)
index f8338acebf077bf2f87254f377df4d057aa70e2d..9fb22e352beb6a528993f7ee43a43e6bc756ce47 100644 (file)
@@ -68,7 +68,7 @@ static void bpf_sk_storage_map_free(struct bpf_map *map)
 
 static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
 {
-       return bpf_local_storage_map_alloc(attr, &sk_cache, false);
+       return bpf_local_storage_map_alloc(attr, &sk_cache);
 }
 
 static int notsupp_get_next_key(struct bpf_map *map, void *key,