]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
bpf: Remove leftover accounting in htab_map_mem_usage after rqspinlock
authorDaniel Borkmann <daniel@iogearbox.net>
Tue, 20 Jan 2026 12:55:01 +0000 (13:55 +0100)
committerAndrii Nakryiko <andrii@kernel.org>
Tue, 20 Jan 2026 19:28:02 +0000 (11:28 -0800)
After commit 4fa8d68aa53e ("bpf: Convert hashtab.c to rqspinlock")
we no longer use HASHTAB_MAP_LOCK_{COUNT,MASK} as the per-CPU
map_locked[HASHTAB_MAP_LOCK_COUNT] array got removed from struct
bpf_htab. Right now it is still accounted for in htab_map_mem_usage.

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/09703eb6bb249f12b1d5253b5a50a0c4fa239d27.1768913513.git.daniel@iogearbox.net
kernel/bpf/hashtab.c

index 441ff5bc54ac2b863879dce8adf56f66ac7d29f6..3b9d297a53bee5b3d1c6001ed12b55b4b1a6b6d1 100644 (file)
@@ -82,9 +82,6 @@ struct bucket {
        rqspinlock_t raw_lock;
 };
 
-#define HASHTAB_MAP_LOCK_COUNT 8
-#define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1)
-
 struct bpf_htab {
        struct bpf_map map;
        struct bpf_mem_alloc ma;
@@ -2237,11 +2234,11 @@ static u64 htab_map_mem_usage(const struct bpf_map *map)
        bool prealloc = htab_is_prealloc(htab);
        bool percpu = htab_is_percpu(htab);
        bool lru = htab_is_lru(htab);
-       u64 num_entries;
-       u64 usage = sizeof(struct bpf_htab);
+       u64 num_entries, usage;
+
+       usage = sizeof(struct bpf_htab) +
+               sizeof(struct bucket) * htab->n_buckets;
 
-       usage += sizeof(struct bucket) * htab->n_buckets;
-       usage += sizeof(int) * num_possible_cpus() * HASHTAB_MAP_LOCK_COUNT;
        if (prealloc) {
                num_entries = map->max_entries;
                if (htab_has_extra_elems(htab))