]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mptcp: drop bogus optimization in __mptcp_check_push()
authorPaolo Abeni <pabeni@redhat.com>
Tue, 28 Oct 2025 08:16:52 +0000 (09:16 +0100)
committerJakub Kicinski <kuba@kernel.org>
Thu, 30 Oct 2025 00:44:28 +0000 (17:44 -0700)
Accessing the transmit queue without owning the msk socket lock is
inherently racy, hence __mptcp_check_push() could actually quit early
even when there is pending data.

That in turn could cause unexpected tx lock and timeout.

Dropping the early check avoids the race, implicitly relaying on later
tests under the relevant lock. With such change, all the other
mptcp_send_head() call sites are now under the msk socket lock and we
can additionally drop the now unneeded annotation on the transmit head
pointer accesses.

Fixes: 6e628cd3a8f7 ("mptcp: use mptcp release_cb for delayed tasks")
Cc: stable@vger.kernel.org
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Reviewed-by: Geliang Tang <geliang@kernel.org>
Tested-by: Geliang Tang <geliang@kernel.org>
Reviewed-by: Mat Martineau <martineau@kernel.org>
Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Link: https://patch.msgid.link/20251028-net-mptcp-send-timeout-v1-1-38ffff5a9ec8@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/mptcp/protocol.c
net/mptcp/protocol.h

index 875027b9319cf6181d480677fc9d21ab6b771c2f..655a2a45224ff03512ab389a8533ec1a4717abdf 100644 (file)
@@ -1007,7 +1007,7 @@ static void __mptcp_clean_una(struct sock *sk)
                        if (WARN_ON_ONCE(!msk->recovery))
                                break;
 
-                       WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
+                       msk->first_pending = mptcp_send_next(sk);
                }
 
                dfrag_clear(sk, dfrag);
@@ -1552,7 +1552,7 @@ static int __subflow_push_pending(struct sock *sk, struct sock *ssk,
 
                        mptcp_update_post_push(msk, dfrag, ret);
                }
-               WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
+               msk->first_pending = mptcp_send_next(sk);
 
                if (msk->snd_burst <= 0 ||
                    !sk_stream_memory_free(ssk) ||
@@ -1912,7 +1912,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                        get_page(dfrag->page);
                        list_add_tail(&dfrag->list, &msk->rtx_queue);
                        if (!msk->first_pending)
-                               WRITE_ONCE(msk->first_pending, dfrag);
+                               msk->first_pending = dfrag;
                }
                pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk,
                         dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
@@ -2882,7 +2882,7 @@ static void __mptcp_clear_xmit(struct sock *sk)
        struct mptcp_sock *msk = mptcp_sk(sk);
        struct mptcp_data_frag *dtmp, *dfrag;
 
-       WRITE_ONCE(msk->first_pending, NULL);
+       msk->first_pending = NULL;
        list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)
                dfrag_clear(sk, dfrag);
 }
@@ -3422,9 +3422,6 @@ void __mptcp_data_acked(struct sock *sk)
 
 void __mptcp_check_push(struct sock *sk, struct sock *ssk)
 {
-       if (!mptcp_send_head(sk))
-               return;
-
        if (!sock_owned_by_user(sk))
                __mptcp_subflow_push_pending(sk, ssk, false);
        else
index 52f9cfa4ce95c789a7b9c53c47095abe7964d18f..379a88e14e8d2549e54a103da1ed758c4bae9bb9 100644 (file)
@@ -414,7 +414,7 @@ static inline struct mptcp_data_frag *mptcp_send_head(const struct sock *sk)
 {
        const struct mptcp_sock *msk = mptcp_sk(sk);
 
-       return READ_ONCE(msk->first_pending);
+       return msk->first_pending;
 }
 
 static inline struct mptcp_data_frag *mptcp_send_next(struct sock *sk)