To prepare for changing bpf_local_storage_map_bucket::lock to rqspinlock,
convert bpf_selem_link_map() to failable. It still always succeeds and
returns 0 until the change happens. No functional change.
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Amery Hung <ameryhung@gmail.com>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://patch.msgid.link/20260205222916.1788211-4-ameryhung@gmail.com
void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now);
-void bpf_selem_link_map(struct bpf_local_storage_map *smap,
- struct bpf_local_storage *local_storage,
- struct bpf_local_storage_elem *selem);
+int bpf_selem_link_map(struct bpf_local_storage_map *smap,
+ struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_elem *selem);
struct bpf_local_storage_elem *
bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value,
hlist_del_init_rcu(&selem->map_node);
}
-void bpf_selem_link_map(struct bpf_local_storage_map *smap,
- struct bpf_local_storage *local_storage,
- struct bpf_local_storage_elem *selem)
+int bpf_selem_link_map(struct bpf_local_storage_map *smap,
+ struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_elem *selem)
{
struct bpf_local_storage_map_bucket *b;
unsigned long flags;
raw_spin_lock_irqsave(&b->lock, flags);
hlist_add_head_rcu(&selem->map_node, &b->list);
raw_spin_unlock_irqrestore(&b->lock, flags);
+
+ return 0;
}
static void bpf_selem_link_map_nolock(struct bpf_local_storage_map_bucket *b,
}
if (new_sk_storage) {
- bpf_selem_link_map(smap, new_sk_storage, copy_selem);
+ ret = bpf_selem_link_map(smap, new_sk_storage, copy_selem);
+ if (ret) {
+ bpf_selem_free(copy_selem, true);
+ atomic_sub(smap->elem_size,
+ &newsk->sk_omem_alloc);
+ bpf_map_put(map);
+ goto out;
+ }
bpf_selem_link_storage_nolock(new_sk_storage, copy_selem);
} else {
ret = bpf_local_storage_alloc(newsk, smap, copy_selem, GFP_ATOMIC);