]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
bpf: Use copy_map_value_locked() in alloc_htab_elem() for BPF_F_LOCK
authorMykyta Yatsenko <yatsenko@meta.com>
Wed, 1 Apr 2026 13:50:36 +0000 (06:50 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Mon, 6 Apr 2026 01:37:32 +0000 (18:37 -0700)
When a BPF_F_LOCK update races with a concurrent delete, the freed
element can be immediately recycled by alloc_htab_elem(). The fast path
in htab_map_update_elem() performs a lockless lookup and then calls
copy_map_value_locked() under the element's spin_lock. If
alloc_htab_elem() recycles the same memory, it overwrites the value
with plain copy_map_value(), without taking the spin_lock, causing
torn writes.

Use copy_map_value_locked() when BPF_F_LOCK is set so the new element's
value is written under the embedded spin_lock, serializing against any
stale lock holders.

Fixes: 96049f3afd50 ("bpf: introduce BPF_F_LOCK flag")
Reported-by: Aaron Esau <aaron1esau@gmail.com>
Closes: https://lore.kernel.org/all/CADucPGRvSRpkneb94dPP08YkOHgNgBnskTK6myUag_Mkjimihg@mail.gmail.com/
Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com>
Link: https://lore.kernel.org/r/20260401-bpf_map_torn_writes-v1-1-782d071c55e7@meta.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
kernel/bpf/hashtab.c

index bc6bc8bb871d412443c0393ac2cf475b1ccef93e..f7ac1ec7be8bf84c756ad57b6a76c38ea684869e 100644 (file)
@@ -1138,6 +1138,10 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
        } else if (fd_htab_map_needs_adjust(htab)) {
                size = round_up(size, 8);
                memcpy(htab_elem_value(l_new, key_size), value, size);
+       } else if (map_flags & BPF_F_LOCK) {
+               copy_map_value_locked(&htab->map,
+                                     htab_elem_value(l_new, key_size),
+                                     value, false);
        } else {
                copy_map_value(&htab->map, htab_elem_value(l_new, key_size), value);
        }