]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
inet: Avoid ehash lookup race in inet_twsk_hashdance_schedule()
authorXuanqiang Luo <luoxuanqiang@kylinos.cn>
Wed, 15 Oct 2025 02:02:36 +0000 (10:02 +0800)
committerJakub Kicinski <kuba@kernel.org>
Fri, 17 Oct 2025 23:08:43 +0000 (16:08 -0700)
Since ehash lookups are lockless, if another CPU is converting sk to tw
concurrently, fetching the newly inserted tw with tw->tw_refcnt == 0 cause
lookup failure.

The call trace map is drawn as follows:
   CPU 0                                CPU 1
   -----                                -----
     inet_twsk_hashdance_schedule()
     spin_lock()
     inet_twsk_add_node_rcu(tw, ...)
__inet_lookup_established()
(find tw, failure due to tw_refcnt = 0)
     __sk_nulls_del_node_init_rcu(sk)
     refcount_set(&tw->tw_refcnt, 3)
     spin_unlock()

By replacing sk with tw atomically via hlist_nulls_replace_init_rcu() after
setting tw_refcnt, we ensure that tw is either fully initialized or not
visible to other CPUs, eliminating the race.

It's worth noting that we held lock_sock() before the replacement, so
there's no need to check if sk is hashed. Thanks to Kuniyuki Iwashima!

Fixes: 3ab5aee7fe84 ("net: Convert TCP & DCCP hash tables to use RCU / hlist_nulls")
Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
Reviewed-by: Jiayuan Chen <jiayuan.chen@linux.dev>
Signed-off-by: Xuanqiang Luo <luoxuanqiang@kylinos.cn>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Link: https://patch.msgid.link/20251015020236.431822-4-xuanqiang.luo@linux.dev
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/ipv4/inet_timewait_sock.c

index c96d61d08854f082a0758e18c85db1a7d0c61005..d4c781a0667fe500b063f0739162b4d1cc6dbcbb 100644 (file)
@@ -88,12 +88,6 @@ void inet_twsk_put(struct inet_timewait_sock *tw)
 }
 EXPORT_SYMBOL_GPL(inet_twsk_put);
 
-static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
-                                  struct hlist_nulls_head *list)
-{
-       hlist_nulls_add_head_rcu(&tw->tw_node, list);
-}
-
 static void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
 {
        __inet_twsk_schedule(tw, timeo, false);
@@ -113,13 +107,12 @@ void inet_twsk_hashdance_schedule(struct inet_timewait_sock *tw,
 {
        const struct inet_sock *inet = inet_sk(sk);
        const struct inet_connection_sock *icsk = inet_csk(sk);
-       struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
        spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
        struct inet_bind_hashbucket *bhead, *bhead2;
 
-       /* Step 1: Put TW into bind hash. Original socket stays there too.
-          Note, that any socket with inet->num != 0 MUST be bound in
-          binding cache, even if it is closed.
+       /* Put TW into bind hash. Original socket stays there too.
+        * Note, that any socket with inet->num != 0 MUST be bound in
+        * binding cache, even if it is closed.
         */
        bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
                        hashinfo->bhash_size)];
@@ -141,19 +134,6 @@ void inet_twsk_hashdance_schedule(struct inet_timewait_sock *tw,
 
        spin_lock(lock);
 
-       /* Step 2: Hash TW into tcp ehash chain */
-       inet_twsk_add_node_rcu(tw, &ehead->chain);
-
-       /* Step 3: Remove SK from hash chain */
-       if (__sk_nulls_del_node_init_rcu(sk))
-               sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
-
-
-       /* Ensure above writes are committed into memory before updating the
-        * refcount.
-        * Provides ordering vs later refcount_inc().
-        */
-       smp_wmb();
        /* tw_refcnt is set to 3 because we have :
         * - one reference for bhash chain.
         * - one reference for ehash chain.
@@ -163,6 +143,15 @@ void inet_twsk_hashdance_schedule(struct inet_timewait_sock *tw,
         */
        refcount_set(&tw->tw_refcnt, 3);
 
+       /* Ensure tw_refcnt has been set before tw is published.
+        * smp_wmb() provides the necessary memory barrier to enforce this
+        * ordering.
+        */
+       smp_wmb();
+
+       hlist_nulls_replace_init_rcu(&sk->sk_nulls_node, &tw->tw_node);
+       sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+
        inet_twsk_schedule(tw, timeo);
 
        spin_unlock(lock);