]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mptcp: use mptcp_schedule_work instead of open-coding it
authorPaolo Abeni <pabeni@redhat.com>
Tue, 11 Apr 2023 20:42:09 +0000 (22:42 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 20 Apr 2023 10:36:59 +0000 (12:36 +0200)
commit a5cb752b125766524c921faab1a45cc96065b0a7 upstream.

Beyond reducing code duplication this also avoids scheduling
the mptcp_worker on a closed socket on some edge scenarios.

The addressed issue is actually older than the blamed commit
below, but this fix needs it as a pre-requisite.

Fixes: ba8f48f7a4d7 ("mptcp: introduce mptcp_schedule_work")
Cc: stable@vger.kernel.org
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Reviewed-by: Matthieu Baerts <matthieu.baerts@tessares.net>
Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
net/mptcp/options.c
net/mptcp/subflow.c

index 5ded85e2c374a1da197aed40641f863d34713bef..3872eadb076bc20182764b10a68b62b6ab284e8d 100644 (file)
@@ -1192,9 +1192,8 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
         */
        if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
                if (mp_opt.data_fin && mp_opt.data_len == 1 &&
-                   mptcp_update_rcv_data_fin(msk, mp_opt.data_seq, mp_opt.dsn64) &&
-                   schedule_work(&msk->work))
-                       sock_hold(subflow->conn);
+                   mptcp_update_rcv_data_fin(msk, mp_opt.data_seq, mp_opt.dsn64))
+                       mptcp_schedule_work((struct sock *)msk);
 
                return true;
        }
index 8f6e48e5db2cee9ce97bef71d44c3f870a038f55..dbc02c2c57ccc95228991c7ba765603ed16f1a77 100644 (file)
@@ -407,9 +407,8 @@ void mptcp_subflow_reset(struct sock *ssk)
 
        tcp_send_active_reset(ssk, GFP_ATOMIC);
        tcp_done(ssk);
-       if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
-           schedule_work(&mptcp_sk(sk)->work))
-               return; /* worker will put sk for us */
+       if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags))
+               mptcp_schedule_work(sk);
 
        sock_put(sk);
 }
@@ -1117,8 +1116,8 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
                                skb_ext_del(skb, SKB_EXT_MPTCP);
                                return MAPPING_OK;
                        } else {
-                               if (updated && schedule_work(&msk->work))
-                                       sock_hold((struct sock *)msk);
+                               if (updated)
+                                       mptcp_schedule_work((struct sock *)msk);
 
                                return MAPPING_DATA_FIN;
                        }
@@ -1221,17 +1220,12 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
 /* sched mptcp worker to remove the subflow if no more data is pending */
 static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
 {
-       struct sock *sk = (struct sock *)msk;
-
        if (likely(ssk->sk_state != TCP_CLOSE))
                return;
 
        if (skb_queue_empty(&ssk->sk_receive_queue) &&
-           !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) {
-               sock_hold(sk);
-               if (!schedule_work(&msk->work))
-                       sock_put(sk);
-       }
+           !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
+               mptcp_schedule_work((struct sock *)msk);
 }
 
 static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)