]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
bpf: Add lookup_and_delete_elem for BPF_MAP_STACK_TRACE
authorTao Chen <chen.dylane@linux.dev>
Thu, 25 Sep 2025 17:50:28 +0000 (01:50 +0800)
committerAndrii Nakryiko <andrii@kernel.org>
Thu, 25 Sep 2025 23:12:14 +0000 (16:12 -0700)
The stacktrace map can be easily full, which will lead to failure in
obtaining the stack. In addition to increasing the size of the map,
another solution is to delete the stack_id after looking it up from
the user, so extend the existing bpf_map_lookup_and_delete_elem()
functionality to stacktrace map types.

Signed-off-by: Tao Chen <chen.dylane@linux.dev>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20250925175030.1615837-1-chen.dylane@linux.dev
include/linux/bpf.h
kernel/bpf/stackmap.c
kernel/bpf/syscall.c

index ea2ed6771cc60c271783ee68502af07b5eaf400b..6338e54a9b1f8dbd91d6fc96c93c023bbca67957 100644 (file)
@@ -2724,7 +2724,7 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
                            u64 flags);
 
-int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
+int bpf_stackmap_extract(struct bpf_map *map, void *key, void *value, bool delete);
 
 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
                                 void *key, void *value, u64 map_flags);
index 3615c06b7dfa987a5055946c0d6931def5103e3b..2e182a3ac4ce542707212fc0b86cfa22422f206f 100644 (file)
@@ -646,7 +646,15 @@ static void *stack_map_lookup_elem(struct bpf_map *map, void *key)
 }
 
 /* Called from syscall */
-int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
+static int stack_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
+                                           void *value, u64 flags)
+{
+       return bpf_stackmap_extract(map, key, value, true);
+}
+
+/* Called from syscall */
+int bpf_stackmap_extract(struct bpf_map *map, void *key, void *value,
+                        bool delete)
 {
        struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
        struct stack_map_bucket *bucket, *old_bucket;
@@ -663,7 +671,10 @@ int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
        memcpy(value, bucket->data, trace_len);
        memset(value + trace_len, 0, map->value_size - trace_len);
 
-       old_bucket = xchg(&smap->buckets[id], bucket);
+       if (delete)
+               old_bucket = bucket;
+       else
+               old_bucket = xchg(&smap->buckets[id], bucket);
        if (old_bucket)
                pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
        return 0;
@@ -754,6 +765,7 @@ const struct bpf_map_ops stack_trace_map_ops = {
        .map_free = stack_map_free,
        .map_get_next_key = stack_map_get_next_key,
        .map_lookup_elem = stack_map_lookup_elem,
+       .map_lookup_and_delete_elem = stack_map_lookup_and_delete_elem,
        .map_update_elem = stack_map_update_elem,
        .map_delete_elem = stack_map_delete_elem,
        .map_check_btf = map_check_no_btf,
index adb05d235011ffcb0978a1476520c10da06e7b49..a48fa86f82a7fc42a1c36619a5cb6009dc0cbfe1 100644 (file)
@@ -320,7 +320,7 @@ static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
        } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
                err = bpf_percpu_cgroup_storage_copy(map, key, value);
        } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
-               err = bpf_stackmap_copy(map, key, value);
+               err = bpf_stackmap_extract(map, key, value, false);
        } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
                err = bpf_fd_array_map_lookup_elem(map, key, value);
        } else if (IS_FD_HASH(map)) {
@@ -1666,7 +1666,8 @@ struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
 }
 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
 
-int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
+int __weak bpf_stackmap_extract(struct bpf_map *map, void *key, void *value,
+                               bool delete)
 {
        return -ENOTSUPP;
 }
@@ -2197,7 +2198,8 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
        } else if (map->map_type == BPF_MAP_TYPE_HASH ||
                   map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
                   map->map_type == BPF_MAP_TYPE_LRU_HASH ||
-                  map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
+                  map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
+                  map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
                if (!bpf_map_is_offloaded(map)) {
                        bpf_disable_instrumentation();
                        rcu_read_lock();