]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
bpf: Add BPF_F_CPU and BPF_F_ALL_CPUS flags support for percpu_cgroup_storage maps
authorLeon Hwang <leon.hwang@linux.dev>
Wed, 7 Jan 2026 02:20:20 +0000 (10:20 +0800)
committerAlexei Starovoitov <ast@kernel.org>
Wed, 7 Jan 2026 04:48:32 +0000 (20:48 -0800)
Introduce BPF_F_ALL_CPUS flag support for percpu_cgroup_storage maps to
allow updating values for all CPUs with a single value for update_elem
API.

Introduce BPF_F_CPU flag support for percpu_cgroup_storage maps to
allow:

* update value for specified CPU for update_elem API.
* lookup value for specified CPU for lookup_elem API.

The BPF_F_CPU flag is passed via map_flags along with embedded cpu info.

Signed-off-by: Leon Hwang <leon.hwang@linux.dev>
Link: https://lore.kernel.org/r/20260107022022.12843-6-leon.hwang@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/bpf-cgroup.h
include/linux/bpf.h
kernel/bpf/local_storage.c
kernel/bpf/syscall.c

index d1eb5c7729cb8972e82473a9bec46a75d126489a..2f535331f9264fe0d676b7027edce1863c9b5158 100644 (file)
@@ -172,7 +172,7 @@ void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
 int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
 
-int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
+int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value, u64 flags);
 int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
                                     void *value, u64 flags);
 
@@ -470,7 +470,7 @@ static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
 static inline void bpf_cgroup_storage_free(
        struct bpf_cgroup_storage *storage) {}
 static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
-                                                void *value) {
+                                                void *value, u64 flags) {
        return 0;
 }
 static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
index f5c259dcc4258c30a2aeae68305e30fd7ea8ea1d..5936f8e2996f19e34f8f0d9249108f3835ed42d1 100644 (file)
@@ -3921,6 +3921,7 @@ static inline bool bpf_map_supports_cpu_flags(enum bpf_map_type map_type)
        case BPF_MAP_TYPE_PERCPU_ARRAY:
        case BPF_MAP_TYPE_PERCPU_HASH:
        case BPF_MAP_TYPE_LRU_PERCPU_HASH:
+       case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
                return true;
        default:
                return false;
index 2ab4b60ffe61ffbd6fea3c4f98aa939892b00d50..1ccbf28b2ad9f1b200b96fbcdeca17c456fc2c7b 100644 (file)
@@ -180,7 +180,7 @@ static long cgroup_storage_update_elem(struct bpf_map *map, void *key,
 }
 
 int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *key,
-                                  void *value)
+                                  void *value, u64 map_flags)
 {
        struct bpf_cgroup_storage_map *map = map_to_storage(_map);
        struct bpf_cgroup_storage *storage;
@@ -198,11 +198,17 @@ int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *key,
         * access 'value_size' of them, so copying rounded areas
         * will not leak any kernel data
         */
+       if (map_flags & BPF_F_CPU) {
+               cpu = map_flags >> 32;
+               copy_map_value(_map, value, per_cpu_ptr(storage->percpu_buf, cpu));
+               goto unlock;
+       }
        size = round_up(_map->value_size, 8);
        for_each_possible_cpu(cpu) {
                copy_map_value_long(_map, value + off, per_cpu_ptr(storage->percpu_buf, cpu));
                off += size;
        }
+unlock:
        rcu_read_unlock();
        return 0;
 }
@@ -212,10 +218,11 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *key,
 {
        struct bpf_cgroup_storage_map *map = map_to_storage(_map);
        struct bpf_cgroup_storage *storage;
-       int cpu, off = 0;
+       void *val;
        u32 size;
+       int cpu;
 
-       if (map_flags != BPF_ANY && map_flags != BPF_EXIST)
+       if ((u32)map_flags & ~(BPF_ANY | BPF_EXIST | BPF_F_CPU | BPF_F_ALL_CPUS))
                return -EINVAL;
 
        rcu_read_lock();
@@ -231,11 +238,17 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *key,
         * returned or zeros which were zero-filled by percpu_alloc,
         * so no kernel data leaks possible
         */
+       if (map_flags & BPF_F_CPU) {
+               cpu = map_flags >> 32;
+               copy_map_value(_map, per_cpu_ptr(storage->percpu_buf, cpu), value);
+               goto unlock;
+       }
        size = round_up(_map->value_size, 8);
        for_each_possible_cpu(cpu) {
-               copy_map_value_long(_map, per_cpu_ptr(storage->percpu_buf, cpu), value + off);
-               off += size;
+               val = (map_flags & BPF_F_ALL_CPUS) ? value : value + size * cpu;
+               copy_map_value(_map, per_cpu_ptr(storage->percpu_buf, cpu), val);
        }
+unlock:
        rcu_read_unlock();
        return 0;
 }
index 5e3b5d828856730ebf6c1b34eb1c25a13d329d9c..ecc0929ce462930a5357daa87d4a22a4390612a6 100644 (file)
@@ -320,7 +320,7 @@ static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
        } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
                err = bpf_percpu_array_copy(map, key, value, flags);
        } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
-               err = bpf_percpu_cgroup_storage_copy(map, key, value);
+               err = bpf_percpu_cgroup_storage_copy(map, key, value, flags);
        } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
                err = bpf_stackmap_extract(map, key, value, false);
        } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {