static struct bpf_local_storage_map_bucket *
select_bucket(struct bpf_local_storage_map *smap,
- struct bpf_local_storage_elem *selem)
+ struct bpf_local_storage *local_storage)
{
- return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
+ return &smap->buckets[hash_ptr(local_storage, smap->bucket_log)];
}
static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
static void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
{
+ struct bpf_local_storage *local_storage;
struct bpf_local_storage_map *smap;
struct bpf_local_storage_map_bucket *b;
unsigned long flags;
/* selem has already be unlinked from smap */
return;
+ local_storage = rcu_dereference_check(selem->local_storage,
+ bpf_rcu_lock_held());
smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
- b = select_bucket(smap, selem);
+ b = select_bucket(smap, local_storage);
raw_spin_lock_irqsave(&b->lock, flags);
if (likely(selem_linked_to_map(selem)))
hlist_del_init_rcu(&selem->map_node);
}
void bpf_selem_link_map(struct bpf_local_storage_map *smap,
+ struct bpf_local_storage *local_storage,
struct bpf_local_storage_elem *selem)
{
- struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem);
+ struct bpf_local_storage_map_bucket *b;
unsigned long flags;
+ b = select_bucket(smap, local_storage);
raw_spin_lock_irqsave(&b->lock, flags);
hlist_add_head_rcu(&selem->map_node, &b->list);
raw_spin_unlock_irqrestore(&b->lock, flags);
storage->use_kmalloc_nolock = smap->use_kmalloc_nolock;
bpf_selem_link_storage_nolock(storage, first_selem);
- bpf_selem_link_map(smap, first_selem);
+ bpf_selem_link_map(smap, storage, first_selem);
owner_storage_ptr =
(struct bpf_local_storage **)owner_storage(smap, owner);
alloc_selem = NULL;
/* First, link the new selem to the map */
- bpf_selem_link_map(smap, selem);
+ bpf_selem_link_map(smap, local_storage, selem);
/* Second, link (and publish) the new selem to local_storage */
bpf_selem_link_storage_nolock(local_storage, selem);