]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
bpf: Consistently use bpf_rcu_lock_held() everywhere
authorAndrii Nakryiko <andrii@kernel.org>
Tue, 14 Oct 2025 20:14:03 +0000 (13:14 -0700)
committerDaniel Borkmann <daniel@iogearbox.net>
Wed, 15 Oct 2025 10:26:12 +0000 (12:26 +0200)
We have many places which open-code what's now is bpf_rcu_lock_held()
macro, so replace all those places with a clean and short macro invocation.
For that, move bpf_rcu_lock_held() macro into include/linux/bpf.h.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: https://lore.kernel.org/bpf/20251014201403.4104511-1-andrii@kernel.org
include/linux/bpf.h
include/linux/bpf_local_storage.h
kernel/bpf/hashtab.c
kernel/bpf/helpers.c

index f87fb203aaaef92982daf206c9f5c714a6f0473b..86afd9ac6848c5355a763fac219a25b650383e46 100644 (file)
@@ -2381,6 +2381,9 @@ bpf_prog_run_array_uprobe(const struct bpf_prog_array *array,
 bool bpf_jit_bypass_spec_v1(void);
 bool bpf_jit_bypass_spec_v4(void);
 
+#define bpf_rcu_lock_held() \
+       (rcu_read_lock_held() || rcu_read_lock_trace_held() || rcu_read_lock_bh_held())
+
 #ifdef CONFIG_BPF_SYSCALL
 DECLARE_PER_CPU(int, bpf_prog_active);
 extern struct mutex bpf_stats_enabled_mutex;
index ab7244d8108f60474f740220f274bbfd50f6bcf5..782f58feea356514a8420070cabf3d213a19a3c1 100644 (file)
@@ -18,9 +18,6 @@
 
 #define BPF_LOCAL_STORAGE_CACHE_SIZE   16
 
-#define bpf_rcu_lock_held()                                                    \
-       (rcu_read_lock_held() || rcu_read_lock_trace_held() ||                 \
-        rcu_read_lock_bh_held())
 struct bpf_local_storage_map_bucket {
        struct hlist_head list;
        raw_spinlock_t lock;
index e7a6ba04dc82ea209d6e3b79448bc9b3d7557266..f876f09355f0d35d572ed18d3b2ce2e36d57fdb1 100644 (file)
@@ -657,8 +657,7 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
        struct htab_elem *l;
        u32 hash, key_size;
 
-       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
-                    !rcu_read_lock_bh_held());
+       WARN_ON_ONCE(!bpf_rcu_lock_held());
 
        key_size = map->key_size;
 
@@ -1086,8 +1085,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
                /* unknown flags */
                return -EINVAL;
 
-       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
-                    !rcu_read_lock_bh_held());
+       WARN_ON_ONCE(!bpf_rcu_lock_held());
 
        key_size = map->key_size;
 
@@ -1194,8 +1192,7 @@ static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value
                /* unknown flags */
                return -EINVAL;
 
-       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
-                    !rcu_read_lock_bh_held());
+       WARN_ON_ONCE(!bpf_rcu_lock_held());
 
        key_size = map->key_size;
 
@@ -1263,8 +1260,7 @@ static long htab_map_update_elem_in_place(struct bpf_map *map, void *key,
                /* unknown flags */
                return -EINVAL;
 
-       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
-                    !rcu_read_lock_bh_held());
+       WARN_ON_ONCE(!bpf_rcu_lock_held());
 
        key_size = map->key_size;
 
@@ -1326,8 +1322,7 @@ static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
                /* unknown flags */
                return -EINVAL;
 
-       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
-                    !rcu_read_lock_bh_held());
+       WARN_ON_ONCE(!bpf_rcu_lock_held());
 
        key_size = map->key_size;
 
@@ -1404,8 +1399,7 @@ static long htab_map_delete_elem(struct bpf_map *map, void *key)
        u32 hash, key_size;
        int ret;
 
-       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
-                    !rcu_read_lock_bh_held());
+       WARN_ON_ONCE(!bpf_rcu_lock_held());
 
        key_size = map->key_size;
 
@@ -1440,8 +1434,7 @@ static long htab_lru_map_delete_elem(struct bpf_map *map, void *key)
        u32 hash, key_size;
        int ret;
 
-       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
-                    !rcu_read_lock_bh_held());
+       WARN_ON_ONCE(!bpf_rcu_lock_held());
 
        key_size = map->key_size;
 
index dea8443f782c545429e1a8c6c26590fcd445721f..825280c953be95161810c30e8a7bcdd4c4d8efe3 100644 (file)
@@ -42,8 +42,7 @@
  */
 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
 {
-       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
-                    !rcu_read_lock_bh_held());
+       WARN_ON_ONCE(!bpf_rcu_lock_held());
        return (unsigned long) map->ops->map_lookup_elem(map, key);
 }
 
@@ -59,8 +58,7 @@ const struct bpf_func_proto bpf_map_lookup_elem_proto = {
 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
           void *, value, u64, flags)
 {
-       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
-                    !rcu_read_lock_bh_held());
+       WARN_ON_ONCE(!bpf_rcu_lock_held());
        return map->ops->map_update_elem(map, key, value, flags);
 }
 
@@ -77,8 +75,7 @@ const struct bpf_func_proto bpf_map_update_elem_proto = {
 
 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
 {
-       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
-                    !rcu_read_lock_bh_held());
+       WARN_ON_ONCE(!bpf_rcu_lock_held());
        return map->ops->map_delete_elem(map, key);
 }
 
@@ -134,8 +131,7 @@ const struct bpf_func_proto bpf_map_peek_elem_proto = {
 
 BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
 {
-       WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
-                    !rcu_read_lock_bh_held());
+       WARN_ON_ONCE(!bpf_rcu_lock_held());
        return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu);
 }