From: Daniel Borkmann Date: Tue, 20 Jan 2026 12:55:01 +0000 (+0100) Subject: bpf: Remove leftover accounting in htab_map_mem_usage after rqspinlock X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=713edc71449f122491f8860be49b40f27d5f46b5;p=thirdparty%2Fkernel%2Flinux.git bpf: Remove leftover accounting in htab_map_mem_usage after rqspinlock After commit 4fa8d68aa53e ("bpf: Convert hashtab.c to rqspinlock") we no longer use HASHTAB_MAP_LOCK_{COUNT,MASK} as the per-CPU map_locked[HASHTAB_MAP_LOCK_COUNT] array got removed from struct bpf_htab. Right now it is still accounted for in htab_map_mem_usage. Signed-off-by: Daniel Borkmann Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/09703eb6bb249f12b1d5253b5a50a0c4fa239d27.1768913513.git.daniel@iogearbox.net --- diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 441ff5bc54ac2..3b9d297a53bee 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -82,9 +82,6 @@ struct bucket { rqspinlock_t raw_lock; }; -#define HASHTAB_MAP_LOCK_COUNT 8 -#define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1) - struct bpf_htab { struct bpf_map map; struct bpf_mem_alloc ma; @@ -2237,11 +2234,11 @@ static u64 htab_map_mem_usage(const struct bpf_map *map) bool prealloc = htab_is_prealloc(htab); bool percpu = htab_is_percpu(htab); bool lru = htab_is_lru(htab); - u64 num_entries; - u64 usage = sizeof(struct bpf_htab); + u64 num_entries, usage; + + usage = sizeof(struct bpf_htab) + + sizeof(struct bucket) * htab->n_buckets; - usage += sizeof(struct bucket) * htab->n_buckets; - usage += sizeof(int) * num_possible_cpus() * HASHTAB_MAP_LOCK_COUNT; if (prealloc) { num_entries = map->max_entries; if (htab_has_extra_elems(htab))