]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
neighbour: Convert rwlock of struct neigh_table to spinlock.
authorKuniyuki Iwashima <kuniyu@google.com>
Wed, 22 Oct 2025 05:39:49 +0000 (05:39 +0000)
committerJakub Kicinski <kuba@kernel.org>
Sat, 25 Oct 2025 00:57:20 +0000 (17:57 -0700)
Only neigh_for_each() and neigh_seq_start/stop() are on the
reader side of neigh_table.lock.

Let's convert rwlock to the plain spinlock.

Signed-off-by: Kuniyuki Iwashima <kuniyu@google.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Link: https://patch.msgid.link/20251022054004.2514876-6-kuniyu@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/net/neighbour.h
net/atm/clip.c
net/core/neighbour.c
net/ipv4/arp.c

index 998ff9eccebb7c957b39e81ce16296d89526dbd2..2dfee6d4258af1b9ea2d0127de7364b82e43c9ba 100644 (file)
@@ -238,7 +238,7 @@ struct neigh_table {
        atomic_t                gc_entries;
        struct list_head        gc_list;
        struct list_head        managed_list;
-       rwlock_t                lock;
+       spinlock_t              lock;
        unsigned long           last_rand;
        struct neigh_statistics __percpu *stats;
        struct neigh_hash_table __rcu *nht;
index f7a5565e794ef1a3f5451b27aa952efbc2b2c967..8f152e5fa6594102caaf4e9714d7e1dc1d5d40a5 100644 (file)
@@ -168,10 +168,10 @@ static int neigh_check_cb(struct neighbour *n)
 
 static void idle_timer_check(struct timer_list *unused)
 {
-       write_lock(&arp_tbl.lock);
+       spin_lock(&arp_tbl.lock);
        __neigh_for_each_release(&arp_tbl, neigh_check_cb);
        mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ);
-       write_unlock(&arp_tbl.lock);
+       spin_unlock(&arp_tbl.lock);
 }
 
 static int clip_arp_rcv(struct sk_buff *skb)
index 6d2164b4d99980dc605f381abf2c3fa611f58578..96a3b1a93252a5b2cc9b73bd650f67545a313742 100644 (file)
@@ -81,7 +81,7 @@ static struct hlist_head *neigh_get_dev_table(struct net_device *dev, int family
 }
 
 /*
-   Neighbour hash table buckets are protected with rwlock tbl->lock.
+   Neighbour hash table buckets are protected with tbl->lock.
 
    - All the scans/updates to hash buckets MUST be made under this lock.
    - NOTHING clever should be made under this lock: no callbacks
@@ -149,7 +149,7 @@ static void neigh_update_gc_list(struct neighbour *n)
 {
        bool on_gc_list, exempt_from_gc;
 
-       write_lock_bh(&n->tbl->lock);
+       spin_lock_bh(&n->tbl->lock);
        write_lock(&n->lock);
        if (n->dead)
                goto out;
@@ -172,14 +172,14 @@ static void neigh_update_gc_list(struct neighbour *n)
        }
 out:
        write_unlock(&n->lock);
-       write_unlock_bh(&n->tbl->lock);
+       spin_unlock_bh(&n->tbl->lock);
 }
 
 static void neigh_update_managed_list(struct neighbour *n)
 {
        bool on_managed_list, add_to_managed;
 
-       write_lock_bh(&n->tbl->lock);
+       spin_lock_bh(&n->tbl->lock);
        write_lock(&n->lock);
        if (n->dead)
                goto out;
@@ -193,7 +193,7 @@ static void neigh_update_managed_list(struct neighbour *n)
                list_add_tail(&n->managed_list, &n->tbl->managed_list);
 out:
        write_unlock(&n->lock);
-       write_unlock_bh(&n->tbl->lock);
+       spin_unlock_bh(&n->tbl->lock);
 }
 
 static void neigh_update_flags(struct neighbour *neigh, u32 flags, int *notify,
@@ -263,7 +263,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
 
        NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
 
-       write_lock_bh(&tbl->lock);
+       spin_lock_bh(&tbl->lock);
 
        list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
                if (refcount_read(&n->refcnt) == 1) {
@@ -292,7 +292,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
 
        WRITE_ONCE(tbl->last_flush, jiffies);
 unlock:
-       write_unlock_bh(&tbl->lock);
+       spin_unlock_bh(&tbl->lock);
 
        return shrunk;
 }
@@ -454,23 +454,23 @@ static void neigh_flush_table(struct neigh_table *tbl)
 
 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
 {
-       write_lock_bh(&tbl->lock);
+       spin_lock_bh(&tbl->lock);
        neigh_flush_dev(tbl, dev, false);
-       write_unlock_bh(&tbl->lock);
+       spin_unlock_bh(&tbl->lock);
 }
 EXPORT_SYMBOL(neigh_changeaddr);
 
 static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
                          bool skip_perm)
 {
-       write_lock_bh(&tbl->lock);
+       spin_lock_bh(&tbl->lock);
        if (likely(dev)) {
                neigh_flush_dev(tbl, dev, skip_perm);
        } else {
                DEBUG_NET_WARN_ON_ONCE(skip_perm);
                neigh_flush_table(tbl);
        }
-       write_unlock_bh(&tbl->lock);
+       spin_unlock_bh(&tbl->lock);
 
        pneigh_ifdown(tbl, dev, skip_perm);
        pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL,
@@ -687,7 +687,7 @@ ___neigh_create(struct neigh_table *tbl, const void *pkey,
 
        n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
 
-       write_lock_bh(&tbl->lock);
+       spin_lock_bh(&tbl->lock);
        nht = rcu_dereference_protected(tbl->nht,
                                        lockdep_is_held(&tbl->lock));
 
@@ -722,13 +722,13 @@ ___neigh_create(struct neigh_table *tbl, const void *pkey,
        hlist_add_head_rcu(&n->dev_list,
                           neigh_get_dev_table(dev, tbl->family));
 
-       write_unlock_bh(&tbl->lock);
+       spin_unlock_bh(&tbl->lock);
        neigh_dbg(2, "neigh %p is created\n", n);
        rc = n;
 out:
        return rc;
 out_tbl_unlock:
-       write_unlock_bh(&tbl->lock);
+       spin_unlock_bh(&tbl->lock);
 out_neigh_release:
        if (!exempt_from_gc)
                atomic_dec(&tbl->gc_entries);
@@ -982,7 +982,7 @@ static void neigh_periodic_work(struct work_struct *work)
 
        NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
 
-       write_lock_bh(&tbl->lock);
+       spin_lock_bh(&tbl->lock);
        nht = rcu_dereference_protected(tbl->nht,
                                        lockdep_is_held(&tbl->lock));
 
@@ -1036,9 +1036,9 @@ static void neigh_periodic_work(struct work_struct *work)
                 * It's fine to release lock here, even if hash table
                 * grows while we are preempted.
                 */
-               write_unlock_bh(&tbl->lock);
+               spin_unlock_bh(&tbl->lock);
                cond_resched();
-               write_lock_bh(&tbl->lock);
+               spin_lock_bh(&tbl->lock);
                nht = rcu_dereference_protected(tbl->nht,
                                                lockdep_is_held(&tbl->lock));
        }
@@ -1049,7 +1049,7 @@ out:
         */
        queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
                              NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
-       write_unlock_bh(&tbl->lock);
+       spin_unlock_bh(&tbl->lock);
 }
 
 static __inline__ int neigh_max_probes(struct neighbour *n)
@@ -1641,12 +1641,12 @@ static void neigh_managed_work(struct work_struct *work)
                                               managed_work.work);
        struct neighbour *neigh;
 
-       write_lock_bh(&tbl->lock);
+       spin_lock_bh(&tbl->lock);
        list_for_each_entry(neigh, &tbl->managed_list, managed_list)
                neigh_event_send_probe(neigh, NULL, false);
        queue_delayed_work(system_power_efficient_wq, &tbl->managed_work,
                           NEIGH_VAR(&tbl->parms, INTERVAL_PROBE_TIME_MS));
-       write_unlock_bh(&tbl->lock);
+       spin_unlock_bh(&tbl->lock);
 }
 
 static void neigh_proxy_process(struct timer_list *t)
@@ -1761,9 +1761,9 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
                        return NULL;
                }
 
-               write_lock_bh(&tbl->lock);
+               spin_lock_bh(&tbl->lock);
                list_add_rcu(&p->list, &tbl->parms.list);
-               write_unlock_bh(&tbl->lock);
+               spin_unlock_bh(&tbl->lock);
 
                neigh_parms_data_state_cleanall(p);
        }
@@ -1783,10 +1783,12 @@ void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
 {
        if (!parms || parms == &tbl->parms)
                return;
-       write_lock_bh(&tbl->lock);
+
+       spin_lock_bh(&tbl->lock);
        list_del_rcu(&parms->list);
        parms->dead = 1;
-       write_unlock_bh(&tbl->lock);
+       spin_unlock_bh(&tbl->lock);
+
        netdev_put(parms->dev, &parms->dev_tracker);
        call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
 }
@@ -1835,7 +1837,7 @@ void neigh_table_init(int index, struct neigh_table *tbl)
        else
                WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
 
-       rwlock_init(&tbl->lock);
+       spin_lock_init(&tbl->lock);
        mutex_init(&tbl->phash_lock);
 
        INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
@@ -1978,10 +1980,10 @@ static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
        err = __neigh_update(neigh, NULL, NUD_FAILED,
                             NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
                             NETLINK_CB(skb).portid, extack);
-       write_lock_bh(&tbl->lock);
+       spin_lock_bh(&tbl->lock);
        neigh_release(neigh);
        neigh_remove_one(neigh);
-       write_unlock_bh(&tbl->lock);
+       spin_unlock_bh(&tbl->lock);
 
 out:
        return err;
@@ -2406,7 +2408,7 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
         * We acquire tbl->lock to be nice to the periodic timers and
         * make sure they always see a consistent set of values.
         */
-       write_lock_bh(&tbl->lock);
+       spin_lock_bh(&tbl->lock);
 
        if (tb[NDTA_PARMS]) {
                struct nlattr *tbp[NDTPA_MAX+1];
@@ -2525,7 +2527,7 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
        err = 0;
 
 errout_tbl_lock:
-       write_unlock_bh(&tbl->lock);
+       spin_unlock_bh(&tbl->lock);
        rcu_read_unlock();
 errout:
        return err;
@@ -3125,14 +3127,14 @@ void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void
        rcu_read_lock();
        nht = rcu_dereference(tbl->nht);
 
-       read_lock_bh(&tbl->lock); /* avoid resizes */
+       spin_lock_bh(&tbl->lock); /* avoid resizes */
        for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
                struct neighbour *n;
 
                neigh_for_each_in_bucket(n, &nht->hash_heads[chain])
                        cb(n, cookie);
        }
-       read_unlock_bh(&tbl->lock);
+       spin_unlock_bh(&tbl->lock);
        rcu_read_unlock();
 }
 EXPORT_SYMBOL(neigh_for_each);
@@ -3402,7 +3404,7 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl
 
        rcu_read_lock();
        state->nht = rcu_dereference(tbl->nht);
-       read_lock_bh(&tbl->lock);
+       spin_lock_bh(&tbl->lock);
 
        return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
 }
@@ -3442,7 +3444,7 @@ void neigh_seq_stop(struct seq_file *seq, void *v)
        struct neigh_seq_state *state = seq->private;
        struct neigh_table *tbl = state->tbl;
 
-       read_unlock_bh(&tbl->lock);
+       spin_unlock_bh(&tbl->lock);
        rcu_read_unlock();
 }
 EXPORT_SYMBOL(neigh_seq_stop);
index 833f2cf97178ee6a50fb3c99d02ed5b17ab5a879..f3bfecf8a2341c5e6583bd711e8b59cd59bdf872 100644 (file)
@@ -1217,10 +1217,10 @@ int arp_invalidate(struct net_device *dev, __be32 ip, bool force)
                        err = neigh_update(neigh, NULL, NUD_FAILED,
                                           NEIGH_UPDATE_F_OVERRIDE|
                                           NEIGH_UPDATE_F_ADMIN, 0);
-               write_lock_bh(&tbl->lock);
+               spin_lock_bh(&tbl->lock);
                neigh_release(neigh);
                neigh_remove_one(neigh);
-               write_unlock_bh(&tbl->lock);
+               spin_unlock_bh(&tbl->lock);
        }
 
        return err;