}
/*
- Neighbour hash table buckets are protected with rwlock tbl->lock.
+ Neighbour hash table buckets are protected with tbl->lock.
- All the scans/updates to hash buckets MUST be made under this lock.
- NOTHING clever should be made under this lock: no callbacks
{
bool on_gc_list, exempt_from_gc;
- write_lock_bh(&n->tbl->lock);
+ spin_lock_bh(&n->tbl->lock);
write_lock(&n->lock);
if (n->dead)
goto out;
}
out:
write_unlock(&n->lock);
- write_unlock_bh(&n->tbl->lock);
+ spin_unlock_bh(&n->tbl->lock);
}
static void neigh_update_managed_list(struct neighbour *n)
{
bool on_managed_list, add_to_managed;
- write_lock_bh(&n->tbl->lock);
+ spin_lock_bh(&n->tbl->lock);
write_lock(&n->lock);
if (n->dead)
goto out;
list_add_tail(&n->managed_list, &n->tbl->managed_list);
out:
write_unlock(&n->lock);
- write_unlock_bh(&n->tbl->lock);
+ spin_unlock_bh(&n->tbl->lock);
}
static void neigh_update_flags(struct neighbour *neigh, u32 flags, int *notify,
NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
- write_lock_bh(&tbl->lock);
+ spin_lock_bh(&tbl->lock);
list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
if (refcount_read(&n->refcnt) == 1) {
WRITE_ONCE(tbl->last_flush, jiffies);
unlock:
- write_unlock_bh(&tbl->lock);
+ spin_unlock_bh(&tbl->lock);
return shrunk;
}
void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
{
- write_lock_bh(&tbl->lock);
+ spin_lock_bh(&tbl->lock);
neigh_flush_dev(tbl, dev, false);
- write_unlock_bh(&tbl->lock);
+ spin_unlock_bh(&tbl->lock);
}
EXPORT_SYMBOL(neigh_changeaddr);
static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
bool skip_perm)
{
- write_lock_bh(&tbl->lock);
+ spin_lock_bh(&tbl->lock);
if (likely(dev)) {
neigh_flush_dev(tbl, dev, skip_perm);
} else {
DEBUG_NET_WARN_ON_ONCE(skip_perm);
neigh_flush_table(tbl);
}
- write_unlock_bh(&tbl->lock);
+ spin_unlock_bh(&tbl->lock);
pneigh_ifdown(tbl, dev, skip_perm);
pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL,
n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
- write_lock_bh(&tbl->lock);
+ spin_lock_bh(&tbl->lock);
nht = rcu_dereference_protected(tbl->nht,
lockdep_is_held(&tbl->lock));
hlist_add_head_rcu(&n->dev_list,
neigh_get_dev_table(dev, tbl->family));
- write_unlock_bh(&tbl->lock);
+ spin_unlock_bh(&tbl->lock);
neigh_dbg(2, "neigh %p is created\n", n);
rc = n;
out:
return rc;
out_tbl_unlock:
- write_unlock_bh(&tbl->lock);
+ spin_unlock_bh(&tbl->lock);
out_neigh_release:
if (!exempt_from_gc)
atomic_dec(&tbl->gc_entries);
NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
- write_lock_bh(&tbl->lock);
+ spin_lock_bh(&tbl->lock);
nht = rcu_dereference_protected(tbl->nht,
lockdep_is_held(&tbl->lock));
* It's fine to release lock here, even if hash table
* grows while we are preempted.
*/
- write_unlock_bh(&tbl->lock);
+ spin_unlock_bh(&tbl->lock);
cond_resched();
- write_lock_bh(&tbl->lock);
+ spin_lock_bh(&tbl->lock);
nht = rcu_dereference_protected(tbl->nht,
lockdep_is_held(&tbl->lock));
}
*/
queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
- write_unlock_bh(&tbl->lock);
+ spin_unlock_bh(&tbl->lock);
}
static __inline__ int neigh_max_probes(struct neighbour *n)
managed_work.work);
struct neighbour *neigh;
- write_lock_bh(&tbl->lock);
+ spin_lock_bh(&tbl->lock);
list_for_each_entry(neigh, &tbl->managed_list, managed_list)
neigh_event_send_probe(neigh, NULL, false);
queue_delayed_work(system_power_efficient_wq, &tbl->managed_work,
NEIGH_VAR(&tbl->parms, INTERVAL_PROBE_TIME_MS));
- write_unlock_bh(&tbl->lock);
+ spin_unlock_bh(&tbl->lock);
}
static void neigh_proxy_process(struct timer_list *t)
return NULL;
}
- write_lock_bh(&tbl->lock);
+ spin_lock_bh(&tbl->lock);
list_add_rcu(&p->list, &tbl->parms.list);
- write_unlock_bh(&tbl->lock);
+ spin_unlock_bh(&tbl->lock);
neigh_parms_data_state_cleanall(p);
}
{
if (!parms || parms == &tbl->parms)
return;
- write_lock_bh(&tbl->lock);
+
+ spin_lock_bh(&tbl->lock);
list_del_rcu(&parms->list);
parms->dead = 1;
- write_unlock_bh(&tbl->lock);
+ spin_unlock_bh(&tbl->lock);
+
netdev_put(parms->dev, &parms->dev_tracker);
call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
}
else
WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
- rwlock_init(&tbl->lock);
+ spin_lock_init(&tbl->lock);
mutex_init(&tbl->phash_lock);
INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
err = __neigh_update(neigh, NULL, NUD_FAILED,
NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
NETLINK_CB(skb).portid, extack);
- write_lock_bh(&tbl->lock);
+ spin_lock_bh(&tbl->lock);
neigh_release(neigh);
neigh_remove_one(neigh);
- write_unlock_bh(&tbl->lock);
+ spin_unlock_bh(&tbl->lock);
out:
return err;
* We acquire tbl->lock to be nice to the periodic timers and
* make sure they always see a consistent set of values.
*/
- write_lock_bh(&tbl->lock);
+ spin_lock_bh(&tbl->lock);
if (tb[NDTA_PARMS]) {
struct nlattr *tbp[NDTPA_MAX+1];
err = 0;
errout_tbl_lock:
- write_unlock_bh(&tbl->lock);
+ spin_unlock_bh(&tbl->lock);
rcu_read_unlock();
errout:
return err;
rcu_read_lock();
nht = rcu_dereference(tbl->nht);
- read_lock_bh(&tbl->lock); /* avoid resizes */
+ spin_lock_bh(&tbl->lock); /* avoid resizes */
for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
struct neighbour *n;
neigh_for_each_in_bucket(n, &nht->hash_heads[chain])
cb(n, cookie);
}
- read_unlock_bh(&tbl->lock);
+ spin_unlock_bh(&tbl->lock);
rcu_read_unlock();
}
EXPORT_SYMBOL(neigh_for_each);
rcu_read_lock();
state->nht = rcu_dereference(tbl->nht);
- read_lock_bh(&tbl->lock);
+ spin_lock_bh(&tbl->lock);
return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
}
struct neigh_seq_state *state = seq->private;
struct neigh_table *tbl = state->tbl;
- read_unlock_bh(&tbl->lock);
+ spin_unlock_bh(&tbl->lock);
rcu_read_unlock();
}
EXPORT_SYMBOL(neigh_seq_stop);