]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mptcp: sched: check both directions for backup
authorMatthieu Baerts (NGI0) <matttbe@kernel.org>
Fri, 9 Aug 2024 09:05:31 +0000 (11:05 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 19 Aug 2024 03:45:47 +0000 (05:45 +0200)
commit b6a66e521a2032f7fcba2af5a9bcbaeaa19b7ca3 upstream.

The 'mptcp_subflow_context' structure has two items related to the
backup flags:

 - 'backup': the subflow has been marked as backup by the other peer

 - 'request_bkup': the backup flag has been set by the host

Before this patch, the scheduler was only looking at the 'backup' flag.
That can make sense in some cases, but it looks like that's not what we
wanted for the general use, because either the path-manager was setting
both of them when sending an MP_PRIO, or the receiver was duplicating
the 'backup' flag in the subflow request.

Note that the use of these two flags in the path-manager are going to be
fixed in the next commits, but this change here is needed not to modify
the behaviour.

Fixes: f296234c98a8 ("mptcp: Add handling of incoming MP_JOIN requests")
Cc: stable@vger.kernel.org
Reviewed-by: Mat Martineau <martineau@kernel.org>
Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
[ Conflicts in protocol.c, because the context has changed in commit
  3ce0852c86b9 ("mptcp: enforce HoL-blocking estimation"), which is not
  in this version. This commit is unrelated to this modification. ]
Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
include/trace/events/mptcp.h
net/mptcp/protocol.c

index 6bf43176f14c14654fd1d656b5805a47f40251da..df26c1dd3d8dc8c06e035a791a1d910c1ecb2c7f 100644 (file)
@@ -34,7 +34,7 @@ TRACE_EVENT(mptcp_subflow_get_send,
                struct sock *ssk;
 
                __entry->active = mptcp_subflow_active(subflow);
-               __entry->backup = subflow->backup;
+               __entry->backup = subflow->backup || subflow->request_bkup;
 
                if (subflow->tcp_sock && sk_fullsock(subflow->tcp_sock))
                        __entry->free = sk_stream_memory_free(subflow->tcp_sock);
index 081c8d00472d4439c352852e2c97adb6f98d19bb..b6b708dbfce9c4c13775e28b103a0762cb453967 100644 (file)
@@ -1514,13 +1514,15 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
                send_info[i].ratio = -1;
        }
        mptcp_for_each_subflow(msk, subflow) {
+               bool backup = subflow->backup || subflow->request_bkup;
+
                trace_mptcp_subflow_get_send(subflow);
                ssk =  mptcp_subflow_tcp_sock(subflow);
                if (!mptcp_subflow_active(subflow))
                        continue;
 
                tout = max(tout, mptcp_timeout_from_subflow(subflow));
-               nr_active += !subflow->backup;
+               nr_active += !backup;
                if (!sk_stream_memory_free(subflow->tcp_sock) || !tcp_sk(ssk)->snd_wnd)
                        continue;
 
@@ -1530,9 +1532,9 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
 
                ratio = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32,
                                pace);
-               if (ratio < send_info[subflow->backup].ratio) {
-                       send_info[subflow->backup].ssk = ssk;
-                       send_info[subflow->backup].ratio = ratio;
+               if (ratio < send_info[backup].ratio) {
+                       send_info[backup].ssk = ssk;
+                       send_info[backup].ratio = ratio;
                }
        }
        __mptcp_set_timeout(sk, tout);