]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mptcp: reset the packet scheduler on incoming MP_PRIO
authorPaolo Abeni <pabeni@redhat.com>
Fri, 8 Apr 2022 19:45:55 +0000 (12:45 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 9 Jun 2022 08:29:57 +0000 (10:29 +0200)
[ Upstream commit 43f5b111d1ff16161ce60e19aeddb999cb6f0b01 ]

When an incoming MP_PRIO option changes the backup
status of any subflow, we need to reset the packet
scheduler status, or the next send could keep using
the previously selected subflow, without taking in account
the new priorities.

Reported-by: Davide Caratti <dcaratti@redhat.com>
Fixes: 40453a5c61f4 ("mptcp: add the incoming MP_PRIO support")
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
net/mptcp/pm.c
net/mptcp/protocol.c
net/mptcp/protocol.h

index aa51b100e03353d0dc2b8f170662bfbe9ad51370..4d6a61acc48708effe4474d44b822885e62ddc34 100644 (file)
@@ -261,14 +261,25 @@ void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
        spin_unlock_bh(&pm->lock);
 }
 
-void mptcp_pm_mp_prio_received(struct sock *sk, u8 bkup)
+void mptcp_pm_mp_prio_received(struct sock *ssk, u8 bkup)
 {
-       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+       struct sock *sk = subflow->conn;
+       struct mptcp_sock *msk;
 
        pr_debug("subflow->backup=%d, bkup=%d\n", subflow->backup, bkup);
-       subflow->backup = bkup;
+       msk = mptcp_sk(sk);
+       if (subflow->backup != bkup) {
+               subflow->backup = bkup;
+               mptcp_data_lock(sk);
+               if (!sock_owned_by_user(sk))
+                       msk->last_snd = NULL;
+               else
+                       __set_bit(MPTCP_RESET_SCHEDULER,  &msk->cb_flags);
+               mptcp_data_unlock(sk);
+       }
 
-       mptcp_event(MPTCP_EVENT_SUB_PRIORITY, mptcp_sk(subflow->conn), sk, GFP_ATOMIC);
+       mptcp_event(MPTCP_EVENT_SUB_PRIORITY, msk, ssk, GFP_ATOMIC);
 }
 
 void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
index 2a9335ce5df11c748c376eec85f9515fd3f0537f..8f54293c1d88759cfb6ce997f4b87b7ebff9c70b 100644 (file)
@@ -3102,6 +3102,8 @@ static void mptcp_release_cb(struct sock *sk)
                        __mptcp_set_connected(sk);
                if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags))
                        __mptcp_error_report(sk);
+               if (__test_and_clear_bit(MPTCP_RESET_SCHEDULER, &msk->cb_flags))
+                       msk->last_snd = NULL;
        }
 
        __mptcp_update_rmem(sk);
index 5655a63aa6a8b216e77fca208e99fb42b33ad666..9ac63fa4866effbfd5f677dfcb6ead5e1b411d09 100644 (file)
 #define MPTCP_RETRANSMIT       4
 #define MPTCP_FLUSH_JOIN_LIST  5
 #define MPTCP_CONNECTED                6
+#define MPTCP_RESET_SCHEDULER  7
 
 static inline bool before64(__u64 seq1, __u64 seq2)
 {