]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 8 Sep 2024 13:06:39 +0000 (15:06 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 8 Sep 2024 13:06:39 +0000 (15:06 +0200)
added patches:
mptcp-avoid-duplicated-sub_closed-events.patch
mptcp-close-subflow-when-receiving-tcp-fin.patch
mptcp-constify-a-bunch-of-of-helpers.patch
mptcp-pm-add_addr-0-is-not-a-new-address.patch
mptcp-pm-avoid-possible-uaf-when-selecting-endp.patch
mptcp-pm-check-add_addr_accept_max-before-accepting-new-add_addr.patch
mptcp-pm-do-not-remove-already-closed-subflows.patch
mptcp-pm-fullmesh-select-the-right-id-later.patch
mptcp-pm-only-decrement-add_addr_accepted-for-mpj-req.patch
mptcp-pm-re-using-id-of-unused-flushed-subflows.patch
mptcp-pm-send-ack-on-an-active-subflow.patch
mptcp-pm-skip-connecting-to-already-established-sf.patch
mptcp-pr_debug-add-missing-n-at-the-end.patch

14 files changed:
queue-5.15/mptcp-avoid-duplicated-sub_closed-events.patch [new file with mode: 0644]
queue-5.15/mptcp-close-subflow-when-receiving-tcp-fin.patch [new file with mode: 0644]
queue-5.15/mptcp-constify-a-bunch-of-of-helpers.patch [new file with mode: 0644]
queue-5.15/mptcp-pm-add_addr-0-is-not-a-new-address.patch [new file with mode: 0644]
queue-5.15/mptcp-pm-avoid-possible-uaf-when-selecting-endp.patch [new file with mode: 0644]
queue-5.15/mptcp-pm-check-add_addr_accept_max-before-accepting-new-add_addr.patch [new file with mode: 0644]
queue-5.15/mptcp-pm-do-not-remove-already-closed-subflows.patch [new file with mode: 0644]
queue-5.15/mptcp-pm-fullmesh-select-the-right-id-later.patch [new file with mode: 0644]
queue-5.15/mptcp-pm-only-decrement-add_addr_accepted-for-mpj-req.patch [new file with mode: 0644]
queue-5.15/mptcp-pm-re-using-id-of-unused-flushed-subflows.patch [new file with mode: 0644]
queue-5.15/mptcp-pm-send-ack-on-an-active-subflow.patch [new file with mode: 0644]
queue-5.15/mptcp-pm-skip-connecting-to-already-established-sf.patch [new file with mode: 0644]
queue-5.15/mptcp-pr_debug-add-missing-n-at-the-end.patch [new file with mode: 0644]
queue-5.15/series

diff --git a/queue-5.15/mptcp-avoid-duplicated-sub_closed-events.patch b/queue-5.15/mptcp-avoid-duplicated-sub_closed-events.patch
new file mode 100644 (file)
index 0000000..ca9b9cf
--- /dev/null
@@ -0,0 +1,89 @@
+From stable+bounces-73740-greg=kroah.com@vger.kernel.org Fri Sep  6 10:35:15 2024
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Fri,  6 Sep 2024 10:34:31 +0200
+Subject: mptcp: avoid duplicated SUB_CLOSED events
+To: stable@vger.kernel.org, gregkh@linuxfoundation.org
+Cc: "MPTCP Upstream" <mptcp@lists.linux.dev>, "Matthieu Baerts (NGI0)" <matttbe@kernel.org>, "Arınç ÜNAL" <arinc.unal@arinc9.com>, "Mat Martineau" <martineau@kernel.org>, "Paolo Abeni" <pabeni@redhat.com>
+Message-ID: <20240906083430.1772159-2-matttbe@kernel.org>
+
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+
+commit d82809b6c5f2676b382f77a5cbeb1a5d91ed2235 upstream.
+
+The initial subflow might have already been closed, but still in the
+connection list. When the worker is instructed to close the subflows
+that have been marked as closed, it might then try to close the initial
+subflow again.
+
+ A consequence of that is that the SUB_CLOSED event can be seen twice:
+
+  # ip mptcp endpoint
+  1.1.1.1 id 1 subflow dev eth0
+  2.2.2.2 id 2 subflow dev eth1
+
+  # ip mptcp monitor &
+  [         CREATED] remid=0 locid=0 saddr4=1.1.1.1 daddr4=9.9.9.9
+  [     ESTABLISHED] remid=0 locid=0 saddr4=1.1.1.1 daddr4=9.9.9.9
+  [  SF_ESTABLISHED] remid=0 locid=2 saddr4=2.2.2.2 daddr4=9.9.9.9
+
+  # ip mptcp endpoint delete id 1
+  [       SF_CLOSED] remid=0 locid=0 saddr4=1.1.1.1 daddr4=9.9.9.9
+  [       SF_CLOSED] remid=0 locid=0 saddr4=1.1.1.1 daddr4=9.9.9.9
+
+The first one is coming from mptcp_pm_nl_rm_subflow_received(), and the
+second one from __mptcp_close_subflow().
+
+To avoid doing the post-closed processing twice, the subflow is now
+marked as closed the first time.
+
+Note that it is not enough to check if we are dealing with the first
+subflow and check its sk_state: the subflow might have been reset or
+closed before calling mptcp_close_ssk().
+
+Fixes: b911c97c7dc7 ("mptcp: add netlink event support")
+Cc: stable@vger.kernel.org
+Tested-by: Arınç ÜNAL <arinc.unal@arinc9.com>
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+[ Conflict in protocol.h due to commit f1f26512a9bf ("mptcp: use plain
+  bool instead of custom binary enum"), commit dfc8d0603033 ("mptcp:
+  implement delayed seq generation for passive fastopen") and more that
+  are not in this version, because they modify the context and the size
+  of __unused. The conflict is easy to resolve, by not only adding the
+  new field (close_event_done), and __unused. ]
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/protocol.c |    6 ++++++
+ net/mptcp/protocol.h |    4 +++-
+ 2 files changed, 9 insertions(+), 1 deletion(-)
+
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2337,6 +2337,12 @@ static void __mptcp_close_ssk(struct soc
+ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+                    struct mptcp_subflow_context *subflow)
+ {
++      /* The first subflow can already be closed and still in the list */
++      if (subflow->close_event_done)
++              return;
++
++      subflow->close_event_done = true;
++
+       if (sk->sk_state == TCP_ESTABLISHED)
+               mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL);
+       __mptcp_close_ssk(sk, ssk, subflow);
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -441,7 +441,9 @@ struct mptcp_subflow_context {
+               can_ack : 1,        /* only after processing the remote a key */
+               disposable : 1,     /* ctx can be free at ulp release time */
+               stale : 1,          /* unable to snd/rcv data, do not use for xmit */
+-              valid_csum_seen : 1;        /* at least one csum validated */
++              valid_csum_seen : 1,        /* at least one csum validated */
++              close_event_done : 1,       /* has done the post-closed part */
++              __unused : 11;
+       enum mptcp_data_avail data_avail;
+       u32     remote_nonce;
+       u64     thmac;
diff --git a/queue-5.15/mptcp-close-subflow-when-receiving-tcp-fin.patch b/queue-5.15/mptcp-close-subflow-when-receiving-tcp-fin.patch
new file mode 100644 (file)
index 0000000..3313401
--- /dev/null
@@ -0,0 +1,90 @@
+From matttbe@kernel.org Fri Sep  6 10:35:29 2024
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Fri,  6 Sep 2024 10:35:21 +0200
+Subject: mptcp: close subflow when receiving TCP+FIN
+To: stable@vger.kernel.org, gregkh@linuxfoundation.org
+Cc: MPTCP Upstream <mptcp@lists.linux.dev>, "Matthieu Baerts (NGI0)" <matttbe@kernel.org>, Mat Martineau <martineau@kernel.org>, Jakub Kicinski <kuba@kernel.org>
+Message-ID: <20240906083520.1773331-2-matttbe@kernel.org>
+
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+
+commit f09b0ad55a1196f5891663f8888463c0541059cb upstream.
+
+When a peer decides to close one subflow in the middle of a connection
+having multiple subflows, the receiver of the first FIN should accept
+that, and close the subflow on its side as well. If not, the subflow
+will stay half closed, and would even continue to be used until the end
+of the MPTCP connection or a reset from the network.
+
+The issue has not been seen before, probably because the in-kernel
+path-manager always sends a RM_ADDR before closing the subflow. Upon the
+reception of this RM_ADDR, the other peer will initiate the closure on
+its side as well. On the other hand, if the RM_ADDR is lost, or if the
+path-manager of the other peer only closes the subflow without sending a
+RM_ADDR, the subflow would switch to TCP_CLOSE_WAIT, but that's it,
+leaving the subflow half-closed.
+
+So now, when the subflow switches to the TCP_CLOSE_WAIT state, and if
+the MPTCP connection has not been closed before with a DATA_FIN, the
+kernel owning the subflow schedules its worker to initiate the closure
+on its side as well.
+
+This issue can be easily reproduced with packetdrill, as visible in [1],
+by creating an additional subflow, injecting a FIN+ACK before sending
+the DATA_FIN, and expecting a FIN+ACK in return.
+
+Fixes: 40947e13997a ("mptcp: schedule worker when subflow is closed")
+Cc: stable@vger.kernel.org
+Link: https://github.com/multipath-tcp/packetdrill/pull/154 [1]
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20240826-net-mptcp-close-extra-sf-fin-v1-1-905199fe1172@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ No conflicts but 'sk' is not available in __mptcp_close_subflow in
+  this version. It would require b6985b9b8295 ("mptcp: use the workqueue
+  to destroy unaccepted sockets") which has not been backported to this
+  version. It is easier to get 'sk' by casting 'msk' into a 'struct
+  sock'. ]
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/protocol.c |    5 ++++-
+ net/mptcp/subflow.c  |    8 ++++++--
+ 2 files changed, 10 insertions(+), 3 deletions(-)
+
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2361,8 +2361,11 @@ static void __mptcp_close_subflow(struct
+       list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
+               struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++              int ssk_state = inet_sk_state_load(ssk);
+-              if (inet_sk_state_load(ssk) != TCP_CLOSE)
++              if (ssk_state != TCP_CLOSE &&
++                  (ssk_state != TCP_CLOSE_WAIT ||
++                   inet_sk_state_load((struct sock *)ssk) != TCP_ESTABLISHED))
+                       continue;
+               /* 'subflow_data_ready' will re-sched once rx queue is empty */
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1131,12 +1131,16 @@ out:
+ /* sched mptcp worker to remove the subflow if no more data is pending */
+ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
+ {
+-      if (likely(ssk->sk_state != TCP_CLOSE))
++      struct sock *sk = (struct sock *)msk;
++
++      if (likely(ssk->sk_state != TCP_CLOSE &&
++                 (ssk->sk_state != TCP_CLOSE_WAIT ||
++                  inet_sk_state_load(sk) != TCP_ESTABLISHED)))
+               return;
+       if (skb_queue_empty(&ssk->sk_receive_queue) &&
+           !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
+-              mptcp_schedule_work((struct sock *)msk);
++              mptcp_schedule_work(sk);
+ }
+ static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
diff --git a/queue-5.15/mptcp-constify-a-bunch-of-of-helpers.patch b/queue-5.15/mptcp-constify-a-bunch-of-of-helpers.patch
new file mode 100644 (file)
index 0000000..f7d5fd3
--- /dev/null
@@ -0,0 +1,234 @@
+From stable+bounces-73734-greg=kroah.com@vger.kernel.org Fri Sep  6 10:32:23 2024
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Fri,  6 Sep 2024 10:31:52 +0200
+Subject: mptcp: constify a bunch of of helpers
+To: stable@vger.kernel.org, gregkh@linuxfoundation.org
+Cc: MPTCP Upstream <mptcp@lists.linux.dev>, Paolo Abeni <pabeni@redhat.com>, Mat Martineau <mathew.j.martineau@linux.intel.com>, Jakub Kicinski <kuba@kernel.org>, Matthieu Baerts <matttbe@kernel.org>
+Message-ID: <20240906083151.1768557-3-matttbe@kernel.org>
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit 90d930882139f166ed2551205d6f6d8c50b656fb upstream.
+
+A few pm-related helpers don't touch arguments which lacking
+the const modifier, let's constify them.
+
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 48e50dcbcbaa ("mptcp: pm: avoid possible UaF when selecting endp")
+[ Conflicts because a few modified helpers from the original patch are
+  not present in this version. We don't need to do anything with them. ]
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm.c         |    4 ++--
+ net/mptcp/pm_netlink.c |   36 ++++++++++++++++++------------------
+ net/mptcp/protocol.h   |   18 +++++++++---------
+ 3 files changed, 29 insertions(+), 29 deletions(-)
+
+--- a/net/mptcp/pm.c
++++ b/net/mptcp/pm.c
+@@ -202,7 +202,7 @@ void mptcp_pm_add_addr_received(struct m
+ }
+ void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
+-                            struct mptcp_addr_info *addr)
++                            const struct mptcp_addr_info *addr)
+ {
+       struct mptcp_pm_data *pm = &msk->pm;
+@@ -260,7 +260,7 @@ void mptcp_pm_mp_fail_received(struct so
+ /* path manager helpers */
+-bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, struct sk_buff *skb,
++bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, const struct sk_buff *skb,
+                             unsigned int opt_size, unsigned int remaining,
+                             struct mptcp_addr_info *addr, bool *echo,
+                             bool *port, bool *drop_other_suboptions)
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -60,7 +60,7 @@ struct pm_nl_pernet {
+ #define ADD_ADDR_RETRANS_MAX  3
+ static bool addresses_equal(const struct mptcp_addr_info *a,
+-                          struct mptcp_addr_info *b, bool use_port)
++                          const struct mptcp_addr_info *b, bool use_port)
+ {
+       bool addr_equals = false;
+@@ -123,7 +123,7 @@ static void remote_address(const struct
+ }
+ static bool lookup_subflow_by_saddr(const struct list_head *list,
+-                                  struct mptcp_addr_info *saddr)
++                                  const struct mptcp_addr_info *saddr)
+ {
+       struct mptcp_subflow_context *subflow;
+       struct mptcp_addr_info cur;
+@@ -141,7 +141,7 @@ static bool lookup_subflow_by_saddr(cons
+ }
+ static bool lookup_subflow_by_daddr(const struct list_head *list,
+-                                  struct mptcp_addr_info *daddr)
++                                  const struct mptcp_addr_info *daddr)
+ {
+       struct mptcp_subflow_context *subflow;
+       struct mptcp_addr_info cur;
+@@ -162,8 +162,8 @@ static struct mptcp_pm_addr_entry *
+ select_local_address(const struct pm_nl_pernet *pernet,
+                    struct mptcp_sock *msk)
+ {
++      const struct sock *sk = (const struct sock *)msk;
+       struct mptcp_pm_addr_entry *entry, *ret = NULL;
+-      struct sock *sk = (struct sock *)msk;
+       msk_owned_by_me(msk);
+@@ -219,16 +219,16 @@ select_signal_address(struct pm_nl_perne
+       return ret;
+ }
+-unsigned int mptcp_pm_get_add_addr_signal_max(struct mptcp_sock *msk)
++unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk)
+ {
+-      struct pm_nl_pernet *pernet;
++      const struct pm_nl_pernet *pernet;
+-      pernet = net_generic(sock_net((struct sock *)msk), pm_nl_pernet_id);
++      pernet = net_generic(sock_net((const struct sock *)msk), pm_nl_pernet_id);
+       return READ_ONCE(pernet->add_addr_signal_max);
+ }
+ EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_signal_max);
+-unsigned int mptcp_pm_get_add_addr_accept_max(struct mptcp_sock *msk)
++unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk)
+ {
+       struct pm_nl_pernet *pernet;
+@@ -237,7 +237,7 @@ unsigned int mptcp_pm_get_add_addr_accep
+ }
+ EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_accept_max);
+-unsigned int mptcp_pm_get_subflows_max(struct mptcp_sock *msk)
++unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk)
+ {
+       struct pm_nl_pernet *pernet;
+@@ -246,7 +246,7 @@ unsigned int mptcp_pm_get_subflows_max(s
+ }
+ EXPORT_SYMBOL_GPL(mptcp_pm_get_subflows_max);
+-unsigned int mptcp_pm_get_local_addr_max(struct mptcp_sock *msk)
++unsigned int mptcp_pm_get_local_addr_max(const struct mptcp_sock *msk)
+ {
+       struct pm_nl_pernet *pernet;
+@@ -264,8 +264,8 @@ static void check_work_pending(struct mp
+ }
+ struct mptcp_pm_add_entry *
+-mptcp_lookup_anno_list_by_saddr(struct mptcp_sock *msk,
+-                              struct mptcp_addr_info *addr)
++mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock *msk,
++                              const struct mptcp_addr_info *addr)
+ {
+       struct mptcp_pm_add_entry *entry;
+@@ -346,7 +346,7 @@ out:
+ struct mptcp_pm_add_entry *
+ mptcp_pm_del_add_timer(struct mptcp_sock *msk,
+-                     struct mptcp_addr_info *addr, bool check_id)
++                     const struct mptcp_addr_info *addr, bool check_id)
+ {
+       struct mptcp_pm_add_entry *entry;
+       struct sock *sk = (struct sock *)msk;
+@@ -364,7 +364,7 @@ mptcp_pm_del_add_timer(struct mptcp_sock
+ }
+ static bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
+-                                   struct mptcp_pm_addr_entry *entry)
++                                   const struct mptcp_pm_addr_entry *entry)
+ {
+       struct mptcp_pm_add_entry *add_entry = NULL;
+       struct sock *sk = (struct sock *)msk;
+@@ -410,8 +410,8 @@ void mptcp_pm_free_anno_list(struct mptc
+       }
+ }
+-static bool lookup_address_in_vec(struct mptcp_addr_info *addrs, unsigned int nr,
+-                                struct mptcp_addr_info *addr)
++static bool lookup_address_in_vec(const struct mptcp_addr_info *addrs, unsigned int nr,
++                                const struct mptcp_addr_info *addr)
+ {
+       int i;
+@@ -1329,7 +1329,7 @@ int mptcp_pm_get_flags_and_ifindex_by_id
+ }
+ static bool remove_anno_list_by_saddr(struct mptcp_sock *msk,
+-                                    struct mptcp_addr_info *addr)
++                                    const struct mptcp_addr_info *addr)
+ {
+       struct mptcp_pm_add_entry *entry;
+@@ -1344,7 +1344,7 @@ static bool remove_anno_list_by_saddr(st
+ }
+ static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
+-                                    struct mptcp_addr_info *addr,
++                                    const struct mptcp_addr_info *addr,
+                                     bool force)
+ {
+       struct mptcp_rm_list list = { .nr = 0 };
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -738,7 +738,7 @@ void mptcp_pm_subflow_closed(struct mptc
+ void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
+                               const struct mptcp_addr_info *addr);
+ void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
+-                            struct mptcp_addr_info *addr);
++                            const struct mptcp_addr_info *addr);
+ void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk);
+ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk);
+ void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
+@@ -752,10 +752,10 @@ void mptcp_pm_free_anno_list(struct mptc
+ bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk);
+ struct mptcp_pm_add_entry *
+ mptcp_pm_del_add_timer(struct mptcp_sock *msk,
+-                     struct mptcp_addr_info *addr, bool check_id);
++                     const struct mptcp_addr_info *addr, bool check_id);
+ struct mptcp_pm_add_entry *
+-mptcp_lookup_anno_list_by_saddr(struct mptcp_sock *msk,
+-                              struct mptcp_addr_info *addr);
++mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock *msk,
++                              const struct mptcp_addr_info *addr);
+ int mptcp_pm_get_flags_and_ifindex_by_id(struct net *net, unsigned int id,
+                                        u8 *flags, int *ifindex);
+@@ -814,7 +814,7 @@ static inline int mptcp_rm_addr_len(cons
+       return TCPOLEN_MPTCP_RM_ADDR_BASE + roundup(rm_list->nr - 1, 4) + 1;
+ }
+-bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, struct sk_buff *skb,
++bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, const struct sk_buff *skb,
+                             unsigned int opt_size, unsigned int remaining,
+                             struct mptcp_addr_info *addr, bool *echo,
+                             bool *port, bool *drop_other_suboptions);
+@@ -830,10 +830,10 @@ void mptcp_pm_nl_rm_subflow_received(str
+                                    const struct mptcp_rm_list *rm_list);
+ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
+ bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
+-unsigned int mptcp_pm_get_add_addr_signal_max(struct mptcp_sock *msk);
+-unsigned int mptcp_pm_get_add_addr_accept_max(struct mptcp_sock *msk);
+-unsigned int mptcp_pm_get_subflows_max(struct mptcp_sock *msk);
+-unsigned int mptcp_pm_get_local_addr_max(struct mptcp_sock *msk);
++unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk);
++unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk);
++unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk);
++unsigned int mptcp_pm_get_local_addr_max(const struct mptcp_sock *msk);
+ void mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk);
+ void mptcp_sockopt_sync_all(struct mptcp_sock *msk);
diff --git a/queue-5.15/mptcp-pm-add_addr-0-is-not-a-new-address.patch b/queue-5.15/mptcp-pm-add_addr-0-is-not-a-new-address.patch
new file mode 100644 (file)
index 0000000..458fcd8
--- /dev/null
@@ -0,0 +1,91 @@
+From stable+bounces-73741-greg=kroah.com@vger.kernel.org Fri Sep  6 10:35:13 2024
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Fri,  6 Sep 2024 10:34:55 +0200
+Subject: mptcp: pm: ADD_ADDR 0 is not a new address
+To: stable@vger.kernel.org, gregkh@linuxfoundation.org
+Cc: MPTCP Upstream <mptcp@lists.linux.dev>, "Matthieu Baerts (NGI0)" <matttbe@kernel.org>, Mat Martineau <martineau@kernel.org>, Paolo Abeni <pabeni@redhat.com>
+Message-ID: <20240906083454.1772761-2-matttbe@kernel.org>
+
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+
+commit 57f86203b41c98b322119dfdbb1ec54ce5e3369b upstream.
+
+The ADD_ADDR 0 with the address from the initial subflow should not be
+considered as a new address: this is not something new. If the host
+receives it, it simply means that the address is available again.
+
+When receiving an ADD_ADDR for the ID 0, the PM already doesn't consider
+it as new by not incrementing the 'add_addr_accepted' counter. But the
+'accept_addr' might not be set if the limit has already been reached:
+this can be bypassed in this case. But before, it is important to check
+that this ADD_ADDR for the ID 0 is for the same address as the initial
+subflow. If not, it is not something that should happen, and the
+ADD_ADDR can be ignored.
+
+Note that if an ADD_ADDR is received while there is already a subflow
+opened using the same address, this ADD_ADDR is ignored as well. It
+means that if multiple ADD_ADDR for ID 0 are received, there will not be
+any duplicated subflows created by the client.
+
+Fixes: d0876b2284cf ("mptcp: add the incoming RM_ADDR support")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+[ Conflicts in pm.c, due to commit 4d25247d3ae4 ("mptcp: bypass
+  in-kernel PM restrictions for non-kernel PMs"), which is not in this
+  version, and changes the context. The same fix can be applied here by
+  adding the new check at the same place. Note that addresses_equal()
+  has been used instead of mptcp_addresses_equal(), renamed in commit
+  4638de5aefe5 ("mptcp: handle local addrs announced by userspace PMs"),
+  not in this version. ]
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm.c         |    4 +++-
+ net/mptcp/pm_netlink.c |    9 +++++++++
+ net/mptcp/protocol.h   |    2 ++
+ 3 files changed, 14 insertions(+), 1 deletion(-)
+
+--- a/net/mptcp/pm.c
++++ b/net/mptcp/pm.c
+@@ -189,7 +189,9 @@ void mptcp_pm_add_addr_received(struct m
+       spin_lock_bh(&pm->lock);
+-      if (!READ_ONCE(pm->accept_addr)) {
++      /* id0 should not have a different address */
++      if ((addr->id == 0 && !mptcp_pm_nl_is_init_remote_addr(msk, addr)) ||
++          (addr->id > 0 && !READ_ONCE(pm->accept_addr))) {
+               mptcp_pm_announce_addr(msk, addr, true);
+               mptcp_pm_add_addr_send_ack(msk);
+       } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -669,6 +669,15 @@ add_addr_echo:
+       mptcp_pm_nl_addr_send_ack(msk);
+ }
++bool mptcp_pm_nl_is_init_remote_addr(struct mptcp_sock *msk,
++                                   const struct mptcp_addr_info *remote)
++{
++      struct mptcp_addr_info mpc_remote;
++
++      remote_address((struct sock_common *)msk, &mpc_remote);
++      return addresses_equal(&mpc_remote, remote, remote->port);
++}
++
+ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
+ {
+       struct mptcp_subflow_context *subflow;
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -742,6 +742,8 @@ void mptcp_pm_add_addr_received(struct m
+ void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
+                             const struct mptcp_addr_info *addr);
+ void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk);
++bool mptcp_pm_nl_is_init_remote_addr(struct mptcp_sock *msk,
++                                   const struct mptcp_addr_info *remote);
+ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk);
+ void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
+                              const struct mptcp_rm_list *rm_list);
diff --git a/queue-5.15/mptcp-pm-avoid-possible-uaf-when-selecting-endp.patch b/queue-5.15/mptcp-pm-avoid-possible-uaf-when-selecting-endp.patch
new file mode 100644 (file)
index 0000000..58e8862
--- /dev/null
@@ -0,0 +1,156 @@
+From stable+bounces-73735-greg=kroah.com@vger.kernel.org Fri Sep  6 10:32:28 2024
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Fri,  6 Sep 2024 10:31:53 +0200
+Subject: mptcp: pm: avoid possible UaF when selecting endp
+To: stable@vger.kernel.org, gregkh@linuxfoundation.org
+Cc: MPTCP Upstream <mptcp@lists.linux.dev>, "Matthieu Baerts (NGI0)" <matttbe@kernel.org>, Paolo Abeni <pabeni@redhat.com>, Mat Martineau <martineau@kernel.org>, Jakub Kicinski <kuba@kernel.org>
+Message-ID: <20240906083151.1768557-4-matttbe@kernel.org>
+
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+
+commit 48e50dcbcbaaf713d82bf2da5c16aeced94ad07d upstream.
+
+select_local_address() and select_signal_address() both select an
+endpoint entry from the list inside an RCU protected section, but return
+a reference to it, to be read later on. If the entry is dereferenced
+after the RCU unlock, reading info could cause a Use-after-Free.
+
+A simple solution is to copy the required info while inside the RCU
+protected section to avoid any risk of UaF later. The address ID might
+need to be modified later to handle the ID0 case later, so a copy seems
+OK to deal with.
+
+Reported-by: Paolo Abeni <pabeni@redhat.com>
+Closes: https://lore.kernel.org/45cd30d3-7710-491c-ae4d-a1368c00beb1@redhat.com
+Fixes: 01cacb00b35c ("mptcp: add netlink-based PM")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20240819-net-mptcp-pm-reusing-id-v1-14-38035d40de5b@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ Conflicts in pm_netlink.c, because quite a bit of new code has been
+  added around since commit 86e39e04482b ("mptcp: keep track of local
+  endpoint still available for each msk"). But the issue is still there.
+  The conflicts have been resolved using the same way: by adding a new
+  parameter to select_local_address() and select_signal_address(), and
+  use it instead of the pointer they were previously returning. The code
+  is simpler in this version, this conflict resolution looks safe. ]
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c |   47 ++++++++++++++++++++++++++---------------------
+ 1 file changed, 26 insertions(+), 21 deletions(-)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -158,12 +158,14 @@ static bool lookup_subflow_by_daddr(cons
+       return false;
+ }
+-static struct mptcp_pm_addr_entry *
++static bool
+ select_local_address(const struct pm_nl_pernet *pernet,
+-                   struct mptcp_sock *msk)
++                   struct mptcp_sock *msk,
++                   struct mptcp_pm_addr_entry *new_entry)
+ {
+       const struct sock *sk = (const struct sock *)msk;
+-      struct mptcp_pm_addr_entry *entry, *ret = NULL;
++      struct mptcp_pm_addr_entry *entry;
++      bool found = false;
+       msk_owned_by_me(msk);
+@@ -187,18 +189,22 @@ select_local_address(const struct pm_nl_
+                * pending join
+                */
+               if (!lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) {
+-                      ret = entry;
++                      *new_entry = *entry;
++                      found = true;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+-      return ret;
++
++      return found;
+ }
+-static struct mptcp_pm_addr_entry *
+-select_signal_address(struct pm_nl_pernet *pernet, unsigned int pos)
++static bool
++select_signal_address(struct pm_nl_pernet *pernet, unsigned int pos,
++                    struct mptcp_pm_addr_entry *new_entry)
+ {
+-      struct mptcp_pm_addr_entry *entry, *ret = NULL;
++      struct mptcp_pm_addr_entry *entry;
++      bool found = false;
+       int i = 0;
+       rcu_read_lock();
+@@ -211,12 +217,14 @@ select_signal_address(struct pm_nl_perne
+               if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL))
+                       continue;
+               if (i++ == pos) {
+-                      ret = entry;
++                      *new_entry = *entry;
++                      found = true;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+-      return ret;
++
++      return found;
+ }
+ unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk)
+@@ -474,7 +482,7 @@ __lookup_addr(struct pm_nl_pernet *perne
+ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
+ {
+       struct sock *sk = (struct sock *)msk;
+-      struct mptcp_pm_addr_entry *local;
++      struct mptcp_pm_addr_entry local;
+       unsigned int add_addr_signal_max;
+       unsigned int local_addr_max;
+       struct pm_nl_pernet *pernet;
+@@ -493,13 +501,11 @@ static void mptcp_pm_create_subflow_or_s
+       /* check first for announce */
+       if (msk->pm.add_addr_signaled < add_addr_signal_max) {
+-              local = select_signal_address(pernet,
+-                                            msk->pm.add_addr_signaled);
+-
+-              if (local) {
+-                      if (mptcp_pm_alloc_anno_list(msk, local)) {
++              if (select_signal_address(pernet, msk->pm.add_addr_signaled,
++                                        &local)) {
++                      if (mptcp_pm_alloc_anno_list(msk, &local)) {
+                               msk->pm.add_addr_signaled++;
+-                              mptcp_pm_announce_addr(msk, &local->addr, false);
++                              mptcp_pm_announce_addr(msk, &local.addr, false);
+                               mptcp_pm_nl_addr_send_ack(msk);
+                       }
+               } else {
+@@ -514,9 +520,8 @@ static void mptcp_pm_create_subflow_or_s
+       if (msk->pm.local_addr_used < local_addr_max &&
+           msk->pm.subflows < subflows_max &&
+           !READ_ONCE(msk->pm.remote_deny_join_id0)) {
+-              local = select_local_address(pernet, msk);
+-              if (local) {
+-                      bool fullmesh = !!(local->flags & MPTCP_PM_ADDR_FLAG_FULLMESH);
++              if (select_local_address(pernet, msk, &local)) {
++                      bool fullmesh = !!(local.flags & MPTCP_PM_ADDR_FLAG_FULLMESH);
+                       struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX];
+                       int i, nr;
+@@ -525,7 +530,7 @@ static void mptcp_pm_create_subflow_or_s
+                       nr = fill_remote_addresses_vec(msk, fullmesh, addrs);
+                       spin_unlock_bh(&msk->pm.lock);
+                       for (i = 0; i < nr; i++)
+-                              __mptcp_subflow_connect(sk, &local->addr, &addrs[i]);
++                              __mptcp_subflow_connect(sk, &local.addr, &addrs[i]);
+                       spin_lock_bh(&msk->pm.lock);
+                       return;
+               }
diff --git a/queue-5.15/mptcp-pm-check-add_addr_accept_max-before-accepting-new-add_addr.patch b/queue-5.15/mptcp-pm-check-add_addr_accept_max-before-accepting-new-add_addr.patch
new file mode 100644 (file)
index 0000000..975a899
--- /dev/null
@@ -0,0 +1,44 @@
+From stable+bounces-73732-greg=kroah.com@vger.kernel.org Fri Sep  6 10:31:09 2024
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Fri,  6 Sep 2024 10:30:44 +0200
+Subject: mptcp: pm: check add_addr_accept_max before accepting new ADD_ADDR
+To: stable@vger.kernel.org, gregkh@linuxfoundation.org
+Cc: MPTCP Upstream <mptcp@lists.linux.dev>, "Matthieu Baerts (NGI0)" <matttbe@kernel.org>, Mat Martineau <martineau@kernel.org>, Jakub Kicinski <kuba@kernel.org>
+Message-ID: <20240906083043.1767111-2-matttbe@kernel.org>
+
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+
+commit 0137a3c7c2ea3f9df8ebfc65d78b4ba712a187bb upstream.
+
+The limits might have changed in between, it is best to check them
+before accepting new ADD_ADDR.
+
+Fixes: d0876b2284cf ("mptcp: add the incoming RM_ADDR support")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20240819-net-mptcp-pm-reusing-id-v1-10-38035d40de5b@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ Conflicts in pm_netlink.c, because the context is different, but the
+  same lines can still be modified to fix the issue. This is due to
+  commit 322ea3778965 ("mptcp: pm: only mark 'subflow' endp as
+  available") not being backported to this version. ]
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -772,8 +772,8 @@ static void mptcp_pm_nl_rm_addr_or_subfl
+                       /* Note: if the subflow has been closed before, this
+                        * add_addr_accepted counter will not be decremented.
+                        */
+-                      msk->pm.add_addr_accepted--;
+-                      WRITE_ONCE(msk->pm.accept_addr, true);
++                      if (--msk->pm.add_addr_accepted < mptcp_pm_get_add_addr_accept_max(msk))
++                              WRITE_ONCE(msk->pm.accept_addr, true);
+               } else if (rm_type == MPTCP_MIB_RMSUBFLOW) {
+                       msk->pm.local_addr_used--;
+               }
diff --git a/queue-5.15/mptcp-pm-do-not-remove-already-closed-subflows.patch b/queue-5.15/mptcp-pm-do-not-remove-already-closed-subflows.patch
new file mode 100644 (file)
index 0000000..a9c63e3
--- /dev/null
@@ -0,0 +1,43 @@
+From stable+bounces-73739-greg=kroah.com@vger.kernel.org Fri Sep  6 10:34:41 2024
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Fri,  6 Sep 2024 10:34:02 +0200
+Subject: mptcp: pm: do not remove already closed subflows
+To: stable@vger.kernel.org, gregkh@linuxfoundation.org
+Cc: MPTCP Upstream <mptcp@lists.linux.dev>, "Matthieu Baerts (NGI0)" <matttbe@kernel.org>, Mat Martineau <martineau@kernel.org>, Paolo Abeni <pabeni@redhat.com>
+Message-ID: <20240906083401.1771515-2-matttbe@kernel.org>
+
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+
+commit 58e1b66b4e4b8a602d3f2843e8eba00a969ecce2 upstream.
+
+It is possible to have in the list already closed subflows, e.g. the
+initial subflow has been already closed, but still in the list. No need
+to try to close it again, and increments the related counters again.
+
+Fixes: 0ee4261a3681 ("mptcp: implement mptcp_pm_remove_subflow")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+[ Conflicts in pm_netlink.c, due to commit 3ad14f54bd74 ("mptcp: more
+  accurate MPC endpoint tracking") which is not in this version, and
+  changes the context. The same fix can be applied here by adding the
+  new check at the same place. ]
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -767,6 +767,9 @@ static void mptcp_pm_nl_rm_addr_or_subfl
+                       int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
+                       u8 id = subflow->local_id;
++                      if (inet_sk_state_load(ssk) == TCP_CLOSE)
++                              continue;
++
+                       if (rm_type == MPTCP_MIB_RMADDR)
+                               id = subflow->remote_id;
diff --git a/queue-5.15/mptcp-pm-fullmesh-select-the-right-id-later.patch b/queue-5.15/mptcp-pm-fullmesh-select-the-right-id-later.patch
new file mode 100644 (file)
index 0000000..e5eba8c
--- /dev/null
@@ -0,0 +1,87 @@
+From stable+bounces-73733-greg=kroah.com@vger.kernel.org Fri Sep  6 10:31:36 2024
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Fri,  6 Sep 2024 10:31:24 +0200
+Subject: mptcp: pm: fullmesh: select the right ID later
+To: stable@vger.kernel.org, gregkh@linuxfoundation.org
+Cc: MPTCP Upstream <mptcp@lists.linux.dev>, "Matthieu Baerts (NGI0)" <matttbe@kernel.org>, Mat Martineau <martineau@kernel.org>, Jakub Kicinski <kuba@kernel.org>
+Message-ID: <20240906083123.1767956-2-matttbe@kernel.org>
+
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+
+commit 09355f7abb9fbfc1a240be029837921ea417bf4f upstream.
+
+When reacting upon the reception of an ADD_ADDR, the in-kernel PM first
+looks for fullmesh endpoints. If there are some, it will pick them,
+using their entry ID.
+
+It should set the ID 0 when using the endpoint corresponding to the
+initial subflow, it is a special case imposed by the MPTCP specs.
+
+Note that msk->mpc_endpoint_id might not be set when receiving the first
+ADD_ADDR from the server. So better to compare the addresses.
+
+Fixes: 1a0d6136c5f0 ("mptcp: local addresses fullmesh")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20240819-net-mptcp-pm-reusing-id-v1-12-38035d40de5b@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ Conflicts in pm_netlink.c, because the new 'mpc_addr' variable is
+  added where the 'local' one was, before commit b9d69db87fb7 ("mptcp:
+  let the in-kernel PM use mixed IPv4 and IPv6 addresses"), that is not
+  a candidate for the backports. This 'local' variable has been moved to
+  the new place to reduce the scope, and help with possible future
+  backports.
+  Note that addresses_equal() has been used instead of
+  mptcp_addresses_equal(), renamed in commit 4638de5aefe5 ("mptcp:
+  handle local addrs announced by userspace PMs"), not in this version. ]
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c |   14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -554,7 +554,7 @@ static unsigned int fill_local_addresses
+ {
+       struct sock *sk = (struct sock *)msk;
+       struct mptcp_pm_addr_entry *entry;
+-      struct mptcp_addr_info local;
++      struct mptcp_addr_info mpc_addr;
+       struct pm_nl_pernet *pernet;
+       unsigned int subflows_max;
+       int i = 0;
+@@ -562,6 +562,8 @@ static unsigned int fill_local_addresses
+       pernet = net_generic(sock_net(sk), pm_nl_pernet_id);
+       subflows_max = mptcp_pm_get_subflows_max(msk);
++      mptcp_local_address((struct sock_common *)msk, &mpc_addr);
++
+       rcu_read_lock();
+       __mptcp_flush_join_list(msk);
+       list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
+@@ -580,7 +582,13 @@ static unsigned int fill_local_addresses
+               if (msk->pm.subflows < subflows_max) {
+                       msk->pm.subflows++;
+-                      addrs[i++] = entry->addr;
++                      addrs[i] = entry->addr;
++
++                      /* Special case for ID0: set the correct ID */
++                      if (addresses_equal(&entry->addr, &mpc_addr, entry->addr.port))
++                              addrs[i].id = 0;
++
++                      i++;
+               }
+       }
+       rcu_read_unlock();
+@@ -589,6 +597,8 @@ static unsigned int fill_local_addresses
+        * 'IPADDRANY' local address
+        */
+       if (!i) {
++              struct mptcp_addr_info local;
++
+               memset(&local, 0, sizeof(local));
+               local.family = msk->pm.remote.family;
diff --git a/queue-5.15/mptcp-pm-only-decrement-add_addr_accepted-for-mpj-req.patch b/queue-5.15/mptcp-pm-only-decrement-add_addr_accepted-for-mpj-req.patch
new file mode 100644 (file)
index 0000000..f34ca8c
--- /dev/null
@@ -0,0 +1,74 @@
+From stable+bounces-73730-greg=kroah.com@vger.kernel.org Fri Sep  6 10:30:19 2024
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Fri,  6 Sep 2024 10:30:01 +0200
+Subject: mptcp: pm: only decrement add_addr_accepted for MPJ req
+To: stable@vger.kernel.org, gregkh@linuxfoundation.org
+Cc: MPTCP Upstream <mptcp@lists.linux.dev>, "Matthieu Baerts (NGI0)" <matttbe@kernel.org>, Mat Martineau <martineau@kernel.org>, Jakub Kicinski <kuba@kernel.org>
+Message-ID: <20240906083000.1766120-2-matttbe@kernel.org>
+
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+
+commit 1c1f721375989579e46741f59523e39ec9b2a9bd upstream.
+
+Adding the following warning ...
+
+  WARN_ON_ONCE(msk->pm.add_addr_accepted == 0)
+
+... before decrementing the add_addr_accepted counter helped to find a
+bug when running the "remove single subflow" subtest from the
+mptcp_join.sh selftest.
+
+Removing a 'subflow' endpoint will first trigger a RM_ADDR, then the
+subflow closure. Before this patch, and upon the reception of the
+RM_ADDR, the other peer will then try to decrement this
+add_addr_accepted. That's not correct because the attached subflows have
+not been created upon the reception of an ADD_ADDR.
+
+A way to solve that is to decrement the counter only if the attached
+subflow was an MP_JOIN to a remote id that was not 0, and initiated by
+the host receiving the RM_ADDR.
+
+Fixes: d0876b2284cf ("mptcp: add the incoming RM_ADDR support")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20240819-net-mptcp-pm-reusing-id-v1-9-38035d40de5b@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ Conflicts in pm_netlink.c, because the context is different, but the
+  same lines can still be modified. The conflicts are due to commit
+  4d25247d3ae4 ("mptcp: bypass in-kernel PM restrictions for non-kernel
+  PMs") and commit a88c9e496937 ("mptcp: do not block subflows creation
+  on errors"), adding new features and not present in this version.
+  Note that because some features to better track subflows are missing
+  in this version, it is required to remove the WARN_ON, because the
+  counter could be 0 in some cases. ]
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c |    8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -757,7 +757,7 @@ static void mptcp_pm_nl_rm_addr_or_subfl
+                       mptcp_close_ssk(sk, ssk, subflow);
+                       spin_lock_bh(&msk->pm.lock);
+-                      removed = true;
++                      removed |= subflow->request_join;
+                       msk->pm.subflows--;
+                       if (rm_type == MPTCP_MIB_RMSUBFLOW)
+                               __MPTCP_INC_STATS(sock_net(sk), rm_type);
+@@ -767,7 +767,11 @@ static void mptcp_pm_nl_rm_addr_or_subfl
+               if (!removed)
+                       continue;
+-              if (rm_type == MPTCP_MIB_RMADDR) {
++              if (rm_type == MPTCP_MIB_RMADDR && rm_list->ids[i] &&
++                  msk->pm.add_addr_accepted != 0) {
++                      /* Note: if the subflow has been closed before, this
++                       * add_addr_accepted counter will not be decremented.
++                       */
+                       msk->pm.add_addr_accepted--;
+                       WRITE_ONCE(msk->pm.accept_addr, true);
+               } else if (rm_type == MPTCP_MIB_RMSUBFLOW) {
diff --git a/queue-5.15/mptcp-pm-re-using-id-of-unused-flushed-subflows.patch b/queue-5.15/mptcp-pm-re-using-id-of-unused-flushed-subflows.patch
new file mode 100644 (file)
index 0000000..ed3440e
--- /dev/null
@@ -0,0 +1,57 @@
+From stable+bounces-73729-greg=kroah.com@vger.kernel.org Fri Sep  6 10:29:13 2024
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Fri,  6 Sep 2024 10:28:54 +0200
+Subject: mptcp: pm: re-using ID of unused flushed subflows
+To: stable@vger.kernel.org, gregkh@linuxfoundation.org
+Cc: MPTCP Upstream <mptcp@lists.linux.dev>, "Matthieu Baerts (NGI0)" <matttbe@kernel.org>, Mat Martineau <martineau@kernel.org>, Jakub Kicinski <kuba@kernel.org>
+Message-ID: <20240906082853.1764704-2-matttbe@kernel.org>
+
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+
+commit ef34a6ea0cab1800f4b3c9c3c2cefd5091e03379 upstream.
+
+If no subflows are attached to the 'subflow' endpoints that are being
+flushed, the corresponding addr IDs will not be marked as available
+again.
+
+Mark all ID as being available when flushing all the 'subflow'
+endpoints, and reset local_addr_used counter to cover these cases.
+
+Note that mptcp_pm_remove_addrs_and_subflows() helper is only called for
+flushing operations, not to remove a specific set of addresses and
+subflows.
+
+Fixes: 06faa2271034 ("mptcp: remove multi addresses and subflows in PM")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20240819-net-mptcp-pm-reusing-id-v1-5-38035d40de5b@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ No conflicts, but the line modifying msk->pm.id_avail_bitmap has been
+  removed, as it is not in this version, introduced later in commit
+  86e39e04482b ("mptcp: keep track of local endpoint still available for
+  each msk") and depending on other ones. The best we can do in this
+  version is to reset local_addr_used counter, better than nothing. ]
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -1498,8 +1498,14 @@ static void mptcp_pm_remove_addrs_and_su
+               mptcp_pm_remove_addr(msk, &alist);
+               spin_unlock_bh(&msk->pm.lock);
+       }
++
+       if (slist.nr)
+               mptcp_pm_remove_subflow(msk, &slist);
++
++      /* Reset counters: maybe some subflows have been removed before */
++      spin_lock_bh(&msk->pm.lock);
++      msk->pm.local_addr_used = 0;
++      spin_unlock_bh(&msk->pm.lock);
+ }
+ static void mptcp_nl_remove_addrs_list(struct net *net,
diff --git a/queue-5.15/mptcp-pm-send-ack-on-an-active-subflow.patch b/queue-5.15/mptcp-pm-send-ack-on-an-active-subflow.patch
new file mode 100644 (file)
index 0000000..d485aba
--- /dev/null
@@ -0,0 +1,61 @@
+From stable+bounces-73738-greg=kroah.com@vger.kernel.org Fri Sep  6 10:34:03 2024
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Fri,  6 Sep 2024 10:33:35 +0200
+Subject: mptcp: pm: send ACK on an active subflow
+To: stable@vger.kernel.org, gregkh@linuxfoundation.org
+Cc: MPTCP Upstream <mptcp@lists.linux.dev>, "Matthieu Baerts (NGI0)" <matttbe@kernel.org>, Mat Martineau <martineau@kernel.org>, Paolo Abeni <pabeni@redhat.com>
+Message-ID: <20240906083334.1770934-2-matttbe@kernel.org>
+
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+
+commit c07cc3ed895f9bfe0c53b5ed6be710c133b4271c upstream.
+
+Taking the first one on the list doesn't work in some cases, e.g. if the
+initial subflow is being removed. Pick another one instead of not
+sending anything.
+
+Fixes: 84dfe3677a6f ("mptcp: send out dedicated ADD_ADDR packet")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+[ Conflicts in pm_netlink.c, because the code has been refactored in
+  commit f5360e9b314c ("mptcp: introduce and use mptcp_pm_send_ack()")
+  which is difficult to backport in this version. The same adaptations
+  have been applied: iterating over all subflows, and send the ACK on
+  the first active subflow. ]
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c |   18 ++++++++++--------
+ 1 file changed, 10 insertions(+), 8 deletions(-)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -693,16 +693,18 @@ void mptcp_pm_nl_addr_send_ack(struct mp
+               return;
+       __mptcp_flush_join_list(msk);
+-      subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node);
+-      if (subflow) {
+-              struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++      mptcp_for_each_subflow(msk, subflow) {
++              if (__mptcp_subflow_active(subflow)) {
++                      struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+-              spin_unlock_bh(&msk->pm.lock);
+-              pr_debug("send ack for %s\n",
+-                       mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr");
++                      spin_unlock_bh(&msk->pm.lock);
++                      pr_debug("send ack for %s\n",
++                               mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr");
+-              mptcp_subflow_send_ack(ssk);
+-              spin_lock_bh(&msk->pm.lock);
++                      mptcp_subflow_send_ack(ssk);
++                      spin_lock_bh(&msk->pm.lock);
++                      break;
++              }
+       }
+ }
diff --git a/queue-5.15/mptcp-pm-skip-connecting-to-already-established-sf.patch b/queue-5.15/mptcp-pm-skip-connecting-to-already-established-sf.patch
new file mode 100644 (file)
index 0000000..9517154
--- /dev/null
@@ -0,0 +1,61 @@
+From stable+bounces-73737-greg=kroah.com@vger.kernel.org Fri Sep  6 10:33:29 2024
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Fri,  6 Sep 2024 10:33:09 +0200
+Subject: mptcp: pm: skip connecting to already established sf
+To: stable@vger.kernel.org, gregkh@linuxfoundation.org
+Cc: MPTCP Upstream <mptcp@lists.linux.dev>, "Matthieu Baerts (NGI0)" <matttbe@kernel.org>, Mat Martineau <martineau@kernel.org>, Paolo Abeni <pabeni@redhat.com>
+Message-ID: <20240906083308.1770314-2-matttbe@kernel.org>
+
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+
+commit bc19ff57637ff563d2bdf2b385b48c41e6509e0d upstream.
+
+The lookup_subflow_by_daddr() helper checks if there is already a
+subflow connected to this address. But there could be a subflow that is
+closing, but taking time due to some reasons: latency, losses, data to
+process, etc.
+
+If an ADD_ADDR is received while the endpoint is being closed, it is
+better to try connecting to it, instead of rejecting it: the peer which
+has sent the ADD_ADDR will not be notified that the ADD_ADDR has been
+rejected for this reason, and the expected subflow will not be created
+at the end.
+
+This helper should then only look for subflows that are established, or
+going to be, but not the ones being closed.
+
+Fixes: d84ad04941c3 ("mptcp: skip connecting the connected address")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+[ Conflicts in pm_netlink.c, due to commit 4638de5aefe5 ("mptcp: handle
+  local addrs announced by userspace PMs"), not in this version, and
+  changing the context. The same fix can still be applied. ]
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c |    9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -145,12 +145,15 @@ static bool lookup_subflow_by_daddr(cons
+ {
+       struct mptcp_subflow_context *subflow;
+       struct mptcp_addr_info cur;
+-      struct sock_common *skc;
+       list_for_each_entry(subflow, list, node) {
+-              skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
++              struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+-              remote_address(skc, &cur);
++              if (!((1 << inet_sk_state_load(ssk)) &
++                    (TCPF_ESTABLISHED | TCPF_SYN_SENT | TCPF_SYN_RECV)))
++                      continue;
++
++              remote_address((struct sock_common *)ssk, &cur);
+               if (addresses_equal(&cur, daddr, daddr->port))
+                       return true;
+       }
diff --git a/queue-5.15/mptcp-pr_debug-add-missing-n-at-the-end.patch b/queue-5.15/mptcp-pr_debug-add-missing-n-at-the-end.patch
new file mode 100644 (file)
index 0000000..0296d85
--- /dev/null
@@ -0,0 +1,980 @@
+From stable+bounces-73736-greg=kroah.com@vger.kernel.org Fri Sep  6 10:32:59 2024
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Fri,  6 Sep 2024 10:32:43 +0200
+Subject: mptcp: pr_debug: add missing \n at the end
+To: stable@vger.kernel.org, gregkh@linuxfoundation.org
+Cc: MPTCP Upstream <mptcp@lists.linux.dev>, "Matthieu Baerts (NGI0)" <matttbe@kernel.org>, Geliang Tang <geliang@kernel.org>, Jakub Kicinski <kuba@kernel.org>
+Message-ID: <20240906083242.1769743-2-matttbe@kernel.org>
+
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+
+commit cb41b195e634d3f1ecfcd845314e64fd4bb3c7aa upstream.
+
+pr_debug() have been added in various places in MPTCP code to help
+developers to debug some situations. With the dynamic debug feature, it
+is easy to enable all or some of them, and asks users to reproduce
+issues with extra debug.
+
+Many of these pr_debug() don't end with a new line, while no 'pr_cont()'
+are used in MPTCP code. So the goal was not to display multiple debug
+messages on one line: they were then not missing the '\n' on purpose.
+Not having the new line at the end causes these messages to be printed
+with a delay, when something else needs to be printed. This issue is not
+visible when many messages need to be printed, but it is annoying and
+confusing when only specific messages are expected, e.g.
+
+  # echo "func mptcp_pm_add_addr_echoed +fmp" \
+        > /sys/kernel/debug/dynamic_debug/control
+  # ./mptcp_join.sh "signal address"; \
+        echo "$(awk '{print $1}' /proc/uptime) - end"; \
+        sleep 5s; \
+        echo "$(awk '{print $1}' /proc/uptime) - restart"; \
+        ./mptcp_join.sh "signal address"
+  013 signal address
+      (...)
+  10.75 - end
+  15.76 - restart
+  013 signal address
+  [  10.367935] mptcp:mptcp_pm_add_addr_echoed: MPTCP: msk=(...)
+      (...)
+
+  => a delay of 5 seconds: printed with a 10.36 ts, but after 'restart'
+     which was printed at the 15.76 ts.
+
+The 'Fixes' tag here below points to the first pr_debug() used without
+'\n' in net/mptcp. This patch could be split in many small ones, with
+different Fixes tag, but it doesn't seem worth it, because it is easy to
+re-generate this patch with this simple 'sed' command:
+
+  git grep -l pr_debug -- net/mptcp |
+    xargs sed -i "s/\(pr_debug(\".*[^n]\)\(\"[,)]\)/\1\\\n\2/g"
+
+So in case of conflicts, simply drop the modifications, and launch this
+command.
+
+Fixes: f870fa0b5768 ("mptcp: Add MPTCP socket stubs")
+Cc: stable@vger.kernel.org
+Reviewed-by: Geliang Tang <geliang@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20240826-net-mptcp-close-extra-sf-fin-v1-4-905199fe1172@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ As mentioned above, conflicts were expected, and resolved by using the
+  'sed' command which is visible above. ]
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/options.c    |   44 +++++++++++++++++++++----------------------
+ net/mptcp/pm.c         |   28 +++++++++++++--------------
+ net/mptcp/pm_netlink.c |   22 ++++++++++-----------
+ net/mptcp/protocol.c   |   50 ++++++++++++++++++++++++-------------------------
+ net/mptcp/protocol.h   |    4 +--
+ net/mptcp/sockopt.c    |    4 +--
+ net/mptcp/subflow.c    |   48 +++++++++++++++++++++++------------------------
+ 7 files changed, 100 insertions(+), 100 deletions(-)
+
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -112,7 +112,7 @@ static void mptcp_parse_option(const str
+                       mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD;
+                       ptr += 2;
+               }
+-              pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d csum=%u",
++              pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d csum=%u\n",
+                        version, flags, opsize, mp_opt->sndr_key,
+                        mp_opt->rcvr_key, mp_opt->data_len, mp_opt->csum);
+               break;
+@@ -126,7 +126,7 @@ static void mptcp_parse_option(const str
+                       ptr += 4;
+                       mp_opt->nonce = get_unaligned_be32(ptr);
+                       ptr += 4;
+-                      pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u",
++                      pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u\n",
+                                mp_opt->backup, mp_opt->join_id,
+                                mp_opt->token, mp_opt->nonce);
+               } else if (opsize == TCPOLEN_MPTCP_MPJ_SYNACK) {
+@@ -137,19 +137,19 @@ static void mptcp_parse_option(const str
+                       ptr += 8;
+                       mp_opt->nonce = get_unaligned_be32(ptr);
+                       ptr += 4;
+-                      pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u",
++                      pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u\n",
+                                mp_opt->backup, mp_opt->join_id,
+                                mp_opt->thmac, mp_opt->nonce);
+               } else if (opsize == TCPOLEN_MPTCP_MPJ_ACK) {
+                       mp_opt->suboptions |= OPTION_MPTCP_MPJ_ACK;
+                       ptr += 2;
+                       memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN);
+-                      pr_debug("MP_JOIN hmac");
++                      pr_debug("MP_JOIN hmac\n");
+               }
+               break;
+       case MPTCPOPT_DSS:
+-              pr_debug("DSS");
++              pr_debug("DSS\n");
+               ptr++;
+               /* we must clear 'mpc_map' be able to detect MP_CAPABLE
+@@ -164,7 +164,7 @@ static void mptcp_parse_option(const str
+               mp_opt->ack64 = (flags & MPTCP_DSS_ACK64) != 0;
+               mp_opt->use_ack = (flags & MPTCP_DSS_HAS_ACK);
+-              pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d",
++              pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d\n",
+                        mp_opt->data_fin, mp_opt->dsn64,
+                        mp_opt->use_map, mp_opt->ack64,
+                        mp_opt->use_ack);
+@@ -202,7 +202,7 @@ static void mptcp_parse_option(const str
+                               ptr += 4;
+                       }
+-                      pr_debug("data_ack=%llu", mp_opt->data_ack);
++                      pr_debug("data_ack=%llu\n", mp_opt->data_ack);
+               }
+               if (mp_opt->use_map) {
+@@ -226,7 +226,7 @@ static void mptcp_parse_option(const str
+                               ptr += 2;
+                       }
+-                      pr_debug("data_seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
++                      pr_debug("data_seq=%llu subflow_seq=%u data_len=%u csum=%d:%u\n",
+                                mp_opt->data_seq, mp_opt->subflow_seq,
+                                mp_opt->data_len, !!(mp_opt->suboptions & OPTION_MPTCP_CSUMREQD),
+                                mp_opt->csum);
+@@ -288,7 +288,7 @@ static void mptcp_parse_option(const str
+                       mp_opt->ahmac = get_unaligned_be64(ptr);
+                       ptr += 8;
+               }
+-              pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d",
++              pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d\n",
+                        (mp_opt->addr.family == AF_INET6) ? "6" : "",
+                        mp_opt->addr.id, mp_opt->ahmac, mp_opt->echo, ntohs(mp_opt->addr.port));
+               break;
+@@ -304,7 +304,7 @@ static void mptcp_parse_option(const str
+               mp_opt->rm_list.nr = opsize - TCPOLEN_MPTCP_RM_ADDR_BASE;
+               for (i = 0; i < mp_opt->rm_list.nr; i++)
+                       mp_opt->rm_list.ids[i] = *ptr++;
+-              pr_debug("RM_ADDR: rm_list_nr=%d", mp_opt->rm_list.nr);
++              pr_debug("RM_ADDR: rm_list_nr=%d\n", mp_opt->rm_list.nr);
+               break;
+       case MPTCPOPT_MP_PRIO:
+@@ -313,7 +313,7 @@ static void mptcp_parse_option(const str
+               mp_opt->suboptions |= OPTION_MPTCP_PRIO;
+               mp_opt->backup = *ptr++ & MPTCP_PRIO_BKUP;
+-              pr_debug("MP_PRIO: prio=%d", mp_opt->backup);
++              pr_debug("MP_PRIO: prio=%d\n", mp_opt->backup);
+               break;
+       case MPTCPOPT_MP_FASTCLOSE:
+@@ -346,7 +346,7 @@ static void mptcp_parse_option(const str
+               ptr += 2;
+               mp_opt->suboptions |= OPTION_MPTCP_FAIL;
+               mp_opt->fail_seq = get_unaligned_be64(ptr);
+-              pr_debug("MP_FAIL: data_seq=%llu", mp_opt->fail_seq);
++              pr_debug("MP_FAIL: data_seq=%llu\n", mp_opt->fail_seq);
+               break;
+       default:
+@@ -409,7 +409,7 @@ bool mptcp_syn_options(struct sock *sk,
+               *size = TCPOLEN_MPTCP_MPC_SYN;
+               return true;
+       } else if (subflow->request_join) {
+-              pr_debug("remote_token=%u, nonce=%u", subflow->remote_token,
++              pr_debug("remote_token=%u, nonce=%u\n", subflow->remote_token,
+                        subflow->local_nonce);
+               opts->suboptions = OPTION_MPTCP_MPJ_SYN;
+               opts->join_id = subflow->local_id;
+@@ -493,7 +493,7 @@ static bool mptcp_established_options_mp
+                       *size = TCPOLEN_MPTCP_MPC_ACK;
+               }
+-              pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d",
++              pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d\n",
+                        subflow, subflow->local_key, subflow->remote_key,
+                        data_len);
+@@ -502,7 +502,7 @@ static bool mptcp_established_options_mp
+               opts->suboptions = OPTION_MPTCP_MPJ_ACK;
+               memcpy(opts->hmac, subflow->hmac, MPTCPOPT_HMAC_LEN);
+               *size = TCPOLEN_MPTCP_MPJ_ACK;
+-              pr_debug("subflow=%p", subflow);
++              pr_debug("subflow=%p\n", subflow);
+               /* we can use the full delegate action helper only from BH context
+                * If we are in process context - sk is flushing the backlog at
+@@ -671,7 +671,7 @@ static bool mptcp_established_options_ad
+       *size = len;
+       if (drop_other_suboptions) {
+-              pr_debug("drop other suboptions");
++              pr_debug("drop other suboptions\n");
+               opts->suboptions = 0;
+               /* note that e.g. DSS could have written into the memory
+@@ -688,7 +688,7 @@ static bool mptcp_established_options_ad
+                                                    msk->remote_key,
+                                                    &opts->addr);
+       }
+-      pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d",
++      pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d\n",
+                opts->addr.id, opts->ahmac, echo, ntohs(opts->addr.port));
+       return true;
+@@ -719,7 +719,7 @@ static bool mptcp_established_options_rm
+       opts->rm_list = rm_list;
+       for (i = 0; i < opts->rm_list.nr; i++)
+-              pr_debug("rm_list_ids[%d]=%d", i, opts->rm_list.ids[i]);
++              pr_debug("rm_list_ids[%d]=%d\n", i, opts->rm_list.ids[i]);
+       return true;
+ }
+@@ -747,7 +747,7 @@ static bool mptcp_established_options_mp
+       opts->suboptions |= OPTION_MPTCP_PRIO;
+       opts->backup = subflow->request_bkup;
+-      pr_debug("prio=%d", opts->backup);
++      pr_debug("prio=%d\n", opts->backup);
+       return true;
+ }
+@@ -787,7 +787,7 @@ static bool mptcp_established_options_mp
+       opts->suboptions |= OPTION_MPTCP_FAIL;
+       opts->fail_seq = subflow->map_seq;
+-      pr_debug("MP_FAIL fail_seq=%llu", opts->fail_seq);
++      pr_debug("MP_FAIL fail_seq=%llu\n", opts->fail_seq);
+       return true;
+ }
+@@ -872,7 +872,7 @@ bool mptcp_synack_options(const struct r
+               opts->csum_reqd = subflow_req->csum_reqd;
+               opts->allow_join_id0 = subflow_req->allow_join_id0;
+               *size = TCPOLEN_MPTCP_MPC_SYNACK;
+-              pr_debug("subflow_req=%p, local_key=%llu",
++              pr_debug("subflow_req=%p, local_key=%llu\n",
+                        subflow_req, subflow_req->local_key);
+               return true;
+       } else if (subflow_req->mp_join) {
+@@ -881,7 +881,7 @@ bool mptcp_synack_options(const struct r
+               opts->join_id = subflow_req->local_id;
+               opts->thmac = subflow_req->thmac;
+               opts->nonce = subflow_req->local_nonce;
+-              pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u",
++              pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u\n",
+                        subflow_req, opts->backup, opts->join_id,
+                        opts->thmac, opts->nonce);
+               *size = TCPOLEN_MPTCP_MPJ_SYNACK;
+--- a/net/mptcp/pm.c
++++ b/net/mptcp/pm.c
+@@ -20,7 +20,7 @@ int mptcp_pm_announce_addr(struct mptcp_
+ {
+       u8 add_addr = READ_ONCE(msk->pm.addr_signal);
+-      pr_debug("msk=%p, local_id=%d, echo=%d", msk, addr->id, echo);
++      pr_debug("msk=%p, local_id=%d, echo=%d\n", msk, addr->id, echo);
+       lockdep_assert_held(&msk->pm.lock);
+@@ -45,7 +45,7 @@ int mptcp_pm_remove_addr(struct mptcp_so
+ {
+       u8 rm_addr = READ_ONCE(msk->pm.addr_signal);
+-      pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr);
++      pr_debug("msk=%p, rm_list_nr=%d\n", msk, rm_list->nr);
+       if (rm_addr) {
+               pr_warn("addr_signal error, rm_addr=%d", rm_addr);
+@@ -61,7 +61,7 @@ int mptcp_pm_remove_addr(struct mptcp_so
+ int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list)
+ {
+-      pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr);
++      pr_debug("msk=%p, rm_list_nr=%d\n", msk, rm_list->nr);
+       spin_lock_bh(&msk->pm.lock);
+       mptcp_pm_nl_rm_subflow_received(msk, rm_list);
+@@ -75,7 +75,7 @@ void mptcp_pm_new_connection(struct mptc
+ {
+       struct mptcp_pm_data *pm = &msk->pm;
+-      pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side);
++      pr_debug("msk=%p, token=%u side=%d\n", msk, msk->token, server_side);
+       WRITE_ONCE(pm->server_side, server_side);
+       mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC);
+@@ -89,7 +89,7 @@ bool mptcp_pm_allow_new_subflow(struct m
+       subflows_max = mptcp_pm_get_subflows_max(msk);
+-      pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
++      pr_debug("msk=%p subflows=%d max=%d allow=%d\n", msk, pm->subflows,
+                subflows_max, READ_ONCE(pm->accept_subflow));
+       /* try to avoid acquiring the lock below */
+@@ -113,7 +113,7 @@ bool mptcp_pm_allow_new_subflow(struct m
+ static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
+                                  enum mptcp_pm_status new_status)
+ {
+-      pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status,
++      pr_debug("msk=%p status=%x new=%lx\n", msk, msk->pm.status,
+                BIT(new_status));
+       if (msk->pm.status & BIT(new_status))
+               return false;
+@@ -128,7 +128,7 @@ void mptcp_pm_fully_established(struct m
+       struct mptcp_pm_data *pm = &msk->pm;
+       bool announce = false;
+-      pr_debug("msk=%p", msk);
++      pr_debug("msk=%p\n", msk);
+       spin_lock_bh(&pm->lock);
+@@ -152,14 +152,14 @@ void mptcp_pm_fully_established(struct m
+ void mptcp_pm_connection_closed(struct mptcp_sock *msk)
+ {
+-      pr_debug("msk=%p", msk);
++      pr_debug("msk=%p\n", msk);
+ }
+ void mptcp_pm_subflow_established(struct mptcp_sock *msk)
+ {
+       struct mptcp_pm_data *pm = &msk->pm;
+-      pr_debug("msk=%p", msk);
++      pr_debug("msk=%p\n", msk);
+       if (!READ_ONCE(pm->work_pending))
+               return;
+@@ -174,7 +174,7 @@ void mptcp_pm_subflow_established(struct
+ void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id)
+ {
+-      pr_debug("msk=%p", msk);
++      pr_debug("msk=%p\n", msk);
+ }
+ void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
+@@ -182,7 +182,7 @@ void mptcp_pm_add_addr_received(struct m
+ {
+       struct mptcp_pm_data *pm = &msk->pm;
+-      pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
++      pr_debug("msk=%p remote_id=%d accept=%d\n", msk, addr->id,
+                READ_ONCE(pm->accept_addr));
+       mptcp_event_addr_announced(msk, addr);
+@@ -208,7 +208,7 @@ void mptcp_pm_add_addr_echoed(struct mpt
+ {
+       struct mptcp_pm_data *pm = &msk->pm;
+-      pr_debug("msk=%p", msk);
++      pr_debug("msk=%p\n", msk);
+       spin_lock_bh(&pm->lock);
+@@ -232,7 +232,7 @@ void mptcp_pm_rm_addr_received(struct mp
+       struct mptcp_pm_data *pm = &msk->pm;
+       u8 i;
+-      pr_debug("msk=%p remote_ids_nr=%d", msk, rm_list->nr);
++      pr_debug("msk=%p remote_ids_nr=%d\n", msk, rm_list->nr);
+       for (i = 0; i < rm_list->nr; i++)
+               mptcp_event_addr_removed(msk, rm_list->ids[i]);
+@@ -257,7 +257,7 @@ void mptcp_pm_mp_prio_received(struct so
+ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
+ {
+-      pr_debug("fail_seq=%llu", fail_seq);
++      pr_debug("fail_seq=%llu\n", fail_seq);
+ }
+ /* path manager helpers */
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -317,7 +317,7 @@ static void mptcp_pm_add_timer(struct ti
+       struct mptcp_sock *msk = entry->sock;
+       struct sock *sk = (struct sock *)msk;
+-      pr_debug("msk=%p", msk);
++      pr_debug("msk=%p\n", msk);
+       if (!msk)
+               return;
+@@ -336,7 +336,7 @@ static void mptcp_pm_add_timer(struct ti
+       spin_lock_bh(&msk->pm.lock);
+       if (!mptcp_pm_should_add_signal_addr(msk)) {
+-              pr_debug("retransmit ADD_ADDR id=%d", entry->addr.id);
++              pr_debug("retransmit ADD_ADDR id=%d\n", entry->addr.id);
+               mptcp_pm_announce_addr(msk, &entry->addr, false);
+               mptcp_pm_add_addr_send_ack(msk);
+               entry->retrans_times++;
+@@ -409,7 +409,7 @@ void mptcp_pm_free_anno_list(struct mptc
+       struct sock *sk = (struct sock *)msk;
+       LIST_HEAD(free_list);
+-      pr_debug("msk=%p", msk);
++      pr_debug("msk=%p\n", msk);
+       spin_lock_bh(&msk->pm.lock);
+       list_splice_init(&msk->pm.anno_list, &free_list);
+@@ -631,7 +631,7 @@ static void mptcp_pm_nl_add_addr_receive
+       add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
+       subflows_max = mptcp_pm_get_subflows_max(msk);
+-      pr_debug("accepted %d:%d remote family %d",
++      pr_debug("accepted %d:%d remote family %d\n",
+                msk->pm.add_addr_accepted, add_addr_accept_max,
+                msk->pm.remote.family);
+@@ -698,7 +698,7 @@ void mptcp_pm_nl_addr_send_ack(struct mp
+               struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+               spin_unlock_bh(&msk->pm.lock);
+-              pr_debug("send ack for %s",
++              pr_debug("send ack for %s\n",
+                        mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr");
+               mptcp_subflow_send_ack(ssk);
+@@ -712,7 +712,7 @@ int mptcp_pm_nl_mp_prio_send_ack(struct
+ {
+       struct mptcp_subflow_context *subflow;
+-      pr_debug("bkup=%d", bkup);
++      pr_debug("bkup=%d\n", bkup);
+       mptcp_for_each_subflow(msk, subflow) {
+               struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+@@ -730,7 +730,7 @@ int mptcp_pm_nl_mp_prio_send_ack(struct
+               __MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPPRIOTX);
+               spin_unlock_bh(&msk->pm.lock);
+-              pr_debug("send ack for mp_prio");
++              pr_debug("send ack for mp_prio\n");
+               mptcp_subflow_send_ack(ssk);
+               spin_lock_bh(&msk->pm.lock);
+@@ -748,7 +748,7 @@ static void mptcp_pm_nl_rm_addr_or_subfl
+       struct sock *sk = (struct sock *)msk;
+       u8 i;
+-      pr_debug("%s rm_list_nr %d",
++      pr_debug("%s rm_list_nr %d\n",
+                rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", rm_list->nr);
+       msk_owned_by_me(msk);
+@@ -779,7 +779,7 @@ static void mptcp_pm_nl_rm_addr_or_subfl
+                       if (rm_list->ids[i] != id)
+                               continue;
+-                      pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u",
++                      pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u\n",
+                                rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow",
+                                i, rm_list->ids[i], subflow->local_id, subflow->remote_id);
+                       spin_unlock_bh(&msk->pm.lock);
+@@ -829,7 +829,7 @@ void mptcp_pm_nl_work(struct mptcp_sock
+       spin_lock_bh(&msk->pm.lock);
+-      pr_debug("msk=%p status=%x", msk, pm->status);
++      pr_debug("msk=%p status=%x\n", msk, pm->status);
+       if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
+               pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
+               mptcp_pm_nl_add_addr_received(msk);
+@@ -1389,7 +1389,7 @@ static int mptcp_nl_remove_subflow_and_s
+       long s_slot = 0, s_num = 0;
+       struct mptcp_rm_list list = { .nr = 0 };
+-      pr_debug("remove_id=%d", addr->id);
++      pr_debug("remove_id=%d\n", addr->id);
+       list.ids[list.nr++] = addr->id;
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -136,7 +136,7 @@ static bool mptcp_try_coalesce(struct so
+           !skb_try_coalesce(to, from, &fragstolen, &delta))
+               return false;
+-      pr_debug("colesced seq %llx into %llx new len %d new end seq %llx",
++      pr_debug("colesced seq %llx into %llx new len %d new end seq %llx\n",
+                MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq,
+                to->len, MPTCP_SKB_CB(from)->end_seq);
+       MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq;
+@@ -170,7 +170,7 @@ static void mptcp_data_queue_ofo(struct
+       end_seq = MPTCP_SKB_CB(skb)->end_seq;
+       max_seq = READ_ONCE(msk->rcv_wnd_sent);
+-      pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk, seq, max_seq,
++      pr_debug("msk=%p seq=%llx limit=%llx empty=%d\n", msk, seq, max_seq,
+                RB_EMPTY_ROOT(&msk->out_of_order_queue));
+       if (after64(end_seq, max_seq)) {
+               /* out of window */
+@@ -577,7 +577,7 @@ static bool __mptcp_move_skbs_from_subfl
+               }
+       }
+-      pr_debug("msk=%p ssk=%p", msk, ssk);
++      pr_debug("msk=%p ssk=%p\n", msk, ssk);
+       tp = tcp_sk(ssk);
+       do {
+               u32 map_remaining, offset;
+@@ -656,7 +656,7 @@ static bool __mptcp_ofo_queue(struct mpt
+       u64 end_seq;
+       p = rb_first(&msk->out_of_order_queue);
+-      pr_debug("msk=%p empty=%d", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
++      pr_debug("msk=%p empty=%d\n", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
+       while (p) {
+               skb = rb_to_skb(p);
+               if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq))
+@@ -678,7 +678,7 @@ static bool __mptcp_ofo_queue(struct mpt
+                       int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
+                       /* skip overlapping data, if any */
+-                      pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d",
++                      pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d\n",
+                                MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq,
+                                delta);
+                       MPTCP_SKB_CB(skb)->offset += delta;
+@@ -1328,7 +1328,7 @@ static int mptcp_sendmsg_frag(struct soc
+       size_t copy;
+       int i;
+-      pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u",
++      pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u\n",
+                msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);
+       if (WARN_ON_ONCE(info->sent > info->limit ||
+@@ -1425,7 +1425,7 @@ alloc_skb:
+       mpext->use_map = 1;
+       mpext->dsn64 = 1;
+-      pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
++      pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d\n",
+                mpext->data_seq, mpext->subflow_seq, mpext->data_len,
+                mpext->dsn64);
+@@ -1812,7 +1812,7 @@ static int mptcp_sendmsg(struct sock *sk
+                       if (!msk->first_pending)
+                               WRITE_ONCE(msk->first_pending, dfrag);
+               }
+-              pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d", msk,
++              pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk,
+                        dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
+                        !dfrag_collapsed);
+@@ -2136,7 +2136,7 @@ static int mptcp_recvmsg(struct sock *sk
+                       }
+               }
+-              pr_debug("block timeout %ld", timeo);
++              pr_debug("block timeout %ld\n", timeo);
+               sk_wait_data(sk, &timeo, NULL);
+       }
+@@ -2146,7 +2146,7 @@ out_err:
+                       tcp_recv_timestamp(msg, sk, &tss);
+       }
+-      pr_debug("msk=%p rx queue empty=%d:%d copied=%d",
++      pr_debug("msk=%p rx queue empty=%d:%d copied=%d\n",
+                msk, skb_queue_empty_lockless(&sk->sk_receive_queue),
+                skb_queue_empty(&msk->receive_queue), copied);
+       if (!(flags & MSG_PEEK))
+@@ -2629,7 +2629,7 @@ void mptcp_subflow_shutdown(struct sock
+               break;
+       default:
+               if (__mptcp_check_fallback(mptcp_sk(sk))) {
+-                      pr_debug("Fallback");
++                      pr_debug("Fallback\n");
+                       ssk->sk_shutdown |= how;
+                       tcp_shutdown(ssk, how);
+@@ -2639,7 +2639,7 @@ void mptcp_subflow_shutdown(struct sock
+                       WRITE_ONCE(mptcp_sk(sk)->snd_una, mptcp_sk(sk)->snd_nxt);
+                       mptcp_schedule_work(sk);
+               } else {
+-                      pr_debug("Sending DATA_FIN on subflow %p", ssk);
++                      pr_debug("Sending DATA_FIN on subflow %p\n", ssk);
+                       tcp_send_ack(ssk);
+                       if (!mptcp_rtx_timer_pending(sk))
+                               mptcp_reset_rtx_timer(sk);
+@@ -2682,7 +2682,7 @@ static void mptcp_check_send_data_fin(st
+       struct mptcp_subflow_context *subflow;
+       struct mptcp_sock *msk = mptcp_sk(sk);
+-      pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu",
++      pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu\n",
+                msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk),
+                msk->snd_nxt, msk->write_seq);
+@@ -2707,7 +2707,7 @@ static void __mptcp_wr_shutdown(struct s
+ {
+       struct mptcp_sock *msk = mptcp_sk(sk);
+-      pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d",
++      pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d\n",
+                msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state,
+                !!mptcp_send_head(sk));
+@@ -2724,7 +2724,7 @@ static void __mptcp_destroy_sock(struct
+       struct mptcp_sock *msk = mptcp_sk(sk);
+       LIST_HEAD(conn_list);
+-      pr_debug("msk=%p", msk);
++      pr_debug("msk=%p\n", msk);
+       might_sleep();
+@@ -2797,7 +2797,7 @@ cleanup:
+               inet_sk_state_store(sk, TCP_CLOSE);
+       sock_hold(sk);
+-      pr_debug("msk=%p state=%d", sk, sk->sk_state);
++      pr_debug("msk=%p state=%d\n", sk, sk->sk_state);
+       if (sk->sk_state == TCP_CLOSE) {
+               __mptcp_destroy_sock(sk);
+               do_cancel_work = true;
+@@ -3004,12 +3004,12 @@ static struct sock *mptcp_accept(struct
+               return NULL;
+       }
+-      pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk));
++      pr_debug("msk=%p, listener=%p\n", msk, mptcp_subflow_ctx(listener->sk));
+       newsk = inet_csk_accept(listener->sk, flags, err, kern);
+       if (!newsk)
+               return NULL;
+-      pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk));
++      pr_debug("msk=%p, subflow is mptcp=%d\n", msk, sk_is_mptcp(newsk));
+       if (sk_is_mptcp(newsk)) {
+               struct mptcp_subflow_context *subflow;
+               struct sock *new_mptcp_sock;
+@@ -3200,7 +3200,7 @@ static int mptcp_get_port(struct sock *s
+       struct socket *ssock;
+       ssock = __mptcp_nmpc_socket(msk);
+-      pr_debug("msk=%p, subflow=%p", msk, ssock);
++      pr_debug("msk=%p, subflow=%p\n", msk, ssock);
+       if (WARN_ON_ONCE(!ssock))
+               return -EINVAL;
+@@ -3218,7 +3218,7 @@ void mptcp_finish_connect(struct sock *s
+       sk = subflow->conn;
+       msk = mptcp_sk(sk);
+-      pr_debug("msk=%p, token=%u", sk, subflow->token);
++      pr_debug("msk=%p, token=%u\n", sk, subflow->token);
+       mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
+       ack_seq++;
+@@ -3259,7 +3259,7 @@ bool mptcp_finish_join(struct sock *ssk)
+       struct socket *parent_sock;
+       bool ret;
+-      pr_debug("msk=%p, subflow=%p", msk, subflow);
++      pr_debug("msk=%p, subflow=%p\n", msk, subflow);
+       /* mptcp socket already closing? */
+       if (!mptcp_is_fully_established(parent)) {
+@@ -3306,7 +3306,7 @@ out:
+ static void mptcp_shutdown(struct sock *sk, int how)
+ {
+-      pr_debug("sk=%p, how=%d", sk, how);
++      pr_debug("sk=%p, how=%d\n", sk, how);
+       if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk))
+               __mptcp_wr_shutdown(sk);
+@@ -3436,7 +3436,7 @@ static int mptcp_listen(struct socket *s
+       struct socket *ssock;
+       int err;
+-      pr_debug("msk=%p", msk);
++      pr_debug("msk=%p\n", msk);
+       lock_sock(sock->sk);
+       ssock = __mptcp_nmpc_socket(msk);
+@@ -3466,7 +3466,7 @@ static int mptcp_stream_accept(struct so
+       struct socket *ssock;
+       int err;
+-      pr_debug("msk=%p", msk);
++      pr_debug("msk=%p\n", msk);
+       lock_sock(sock->sk);
+       if (sock->sk->sk_state != TCP_LISTEN)
+@@ -3570,7 +3570,7 @@ static __poll_t mptcp_poll(struct file *
+       sock_poll_wait(file, sock, wait);
+       state = inet_sk_state_load(sk);
+-      pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
++      pr_debug("msk=%p state=%d flags=%lx\n", msk, state, msk->flags);
+       if (state == TCP_LISTEN)
+               return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM : 0;
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -865,7 +865,7 @@ static inline bool mptcp_check_fallback(
+ static inline void __mptcp_do_fallback(struct mptcp_sock *msk)
+ {
+       if (test_bit(MPTCP_FALLBACK_DONE, &msk->flags)) {
+-              pr_debug("TCP fallback already done (msk=%p)", msk);
++              pr_debug("TCP fallback already done (msk=%p)\n", msk);
+               return;
+       }
+       set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
+@@ -879,7 +879,7 @@ static inline void mptcp_do_fallback(str
+       __mptcp_do_fallback(msk);
+ }
+-#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)", __func__, a)
++#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)\n", __func__, a)
+ static inline bool subflow_simultaneous_connect(struct sock *sk)
+ {
+--- a/net/mptcp/sockopt.c
++++ b/net/mptcp/sockopt.c
+@@ -681,7 +681,7 @@ int mptcp_setsockopt(struct sock *sk, in
+       struct mptcp_sock *msk = mptcp_sk(sk);
+       struct sock *ssk;
+-      pr_debug("msk=%p", msk);
++      pr_debug("msk=%p\n", msk);
+       if (level == SOL_SOCKET)
+               return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen);
+@@ -799,7 +799,7 @@ int mptcp_getsockopt(struct sock *sk, in
+       struct mptcp_sock *msk = mptcp_sk(sk);
+       struct sock *ssk;
+-      pr_debug("msk=%p", msk);
++      pr_debug("msk=%p\n", msk);
+       /* @@ the meaning of setsockopt() when the socket is connected and
+        * there are multiple subflows is not yet defined. It is up to the
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -39,7 +39,7 @@ static void subflow_req_destructor(struc
+ {
+       struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
+-      pr_debug("subflow_req=%p", subflow_req);
++      pr_debug("subflow_req=%p\n", subflow_req);
+       if (subflow_req->msk)
+               sock_put((struct sock *)subflow_req->msk);
+@@ -143,7 +143,7 @@ static int subflow_check_req(struct requ
+       struct mptcp_options_received mp_opt;
+       bool opt_mp_capable, opt_mp_join;
+-      pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
++      pr_debug("subflow_req=%p, listener=%p\n", subflow_req, listener);
+ #ifdef CONFIG_TCP_MD5SIG
+       /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
+@@ -216,7 +216,7 @@ again:
+               }
+               if (subflow_use_different_sport(subflow_req->msk, sk_listener)) {
+-                      pr_debug("syn inet_sport=%d %d",
++                      pr_debug("syn inet_sport=%d %d\n",
+                                ntohs(inet_sk(sk_listener)->inet_sport),
+                                ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
+                       if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
+@@ -235,7 +235,7 @@ again:
+                               return -EPERM;
+               }
+-              pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
++              pr_debug("token=%u, remote_nonce=%u msk=%p\n", subflow_req->token,
+                        subflow_req->remote_nonce, subflow_req->msk);
+       }
+@@ -409,7 +409,7 @@ static void subflow_finish_connect(struc
+       subflow->rel_write_seq = 1;
+       subflow->conn_finished = 1;
+       subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
+-      pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
++      pr_debug("subflow=%p synack seq=%x\n", subflow, subflow->ssn_offset);
+       mptcp_get_options(skb, &mp_opt);
+       if (subflow->request_mptcp) {
+@@ -428,7 +428,7 @@ static void subflow_finish_connect(struc
+               subflow->mp_capable = 1;
+               subflow->can_ack = 1;
+               subflow->remote_key = mp_opt.sndr_key;
+-              pr_debug("subflow=%p, remote_key=%llu", subflow,
++              pr_debug("subflow=%p, remote_key=%llu\n", subflow,
+                        subflow->remote_key);
+               MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
+               mptcp_finish_connect(sk);
+@@ -444,7 +444,7 @@ static void subflow_finish_connect(struc
+               subflow->backup = mp_opt.backup;
+               subflow->thmac = mp_opt.thmac;
+               subflow->remote_nonce = mp_opt.nonce;
+-              pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d",
++              pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d\n",
+                        subflow, subflow->thmac, subflow->remote_nonce,
+                        subflow->backup);
+@@ -470,7 +470,7 @@ static void subflow_finish_connect(struc
+                       MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKBACKUPRX);
+               if (subflow_use_different_dport(mptcp_sk(parent), sk)) {
+-                      pr_debug("synack inet_dport=%d %d",
++                      pr_debug("synack inet_dport=%d %d\n",
+                                ntohs(inet_sk(sk)->inet_dport),
+                                ntohs(inet_sk(parent)->inet_dport));
+                       MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX);
+@@ -494,7 +494,7 @@ static int subflow_v4_conn_request(struc
+ {
+       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+-      pr_debug("subflow=%p", subflow);
++      pr_debug("subflow=%p\n", subflow);
+       /* Never answer to SYNs sent to broadcast or multicast */
+       if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
+@@ -525,7 +525,7 @@ static int subflow_v6_conn_request(struc
+ {
+       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+-      pr_debug("subflow=%p", subflow);
++      pr_debug("subflow=%p\n", subflow);
+       if (skb->protocol == htons(ETH_P_IP))
+               return subflow_v4_conn_request(sk, skb);
+@@ -670,7 +670,7 @@ static struct sock *subflow_syn_recv_soc
+       struct sock *new_msk = NULL;
+       struct sock *child;
+-      pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
++      pr_debug("listener=%p, req=%p, conn=%p\n", listener, req, listener->conn);
+       /* After child creation we must look for MPC even when options
+        * are not parsed
+@@ -782,7 +782,7 @@ create_child:
+                       ctx->conn = (struct sock *)owner;
+                       if (subflow_use_different_sport(owner, sk)) {
+-                              pr_debug("ack inet_sport=%d %d",
++                              pr_debug("ack inet_sport=%d %d\n",
+                                        ntohs(inet_sk(sk)->inet_sport),
+                                        ntohs(inet_sk((struct sock *)owner)->inet_sport));
+                               if (!mptcp_pm_sport_in_anno_list(owner, sk)) {
+@@ -837,7 +837,7 @@ enum mapping_status {
+ static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
+ {
+-      pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
++      pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d\n",
+                ssn, subflow->map_subflow_seq, subflow->map_data_len);
+ }
+@@ -1009,7 +1009,7 @@ static enum mapping_status get_mapping_s
+               if (data_len == 1) {
+                       bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
+                                                                mpext->dsn64);
+-                      pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
++                      pr_debug("DATA_FIN with no payload seq=%llu\n", mpext->data_seq);
+                       if (subflow->map_valid) {
+                               /* A DATA_FIN might arrive in a DSS
+                                * option before the previous mapping
+@@ -1034,7 +1034,7 @@ static enum mapping_status get_mapping_s
+                               data_fin_seq &= GENMASK_ULL(31, 0);
+                       mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
+-                      pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d",
++                      pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d\n",
+                                data_fin_seq, mpext->dsn64);
+               }
+@@ -1081,7 +1081,7 @@ static enum mapping_status get_mapping_s
+       if (unlikely(subflow->map_csum_reqd != csum_reqd))
+               return MAPPING_INVALID;
+-      pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
++      pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u\n",
+                subflow->map_seq, subflow->map_subflow_seq,
+                subflow->map_data_len, subflow->map_csum_reqd,
+                subflow->map_data_csum);
+@@ -1116,7 +1116,7 @@ static void mptcp_subflow_discard_data(s
+       avail_len = skb->len - offset;
+       incr = limit >= avail_len ? avail_len + fin : limit;
+-      pr_debug("discarding=%d len=%d offset=%d seq=%d", incr, skb->len,
++      pr_debug("discarding=%d len=%d offset=%d seq=%d\n", incr, skb->len,
+                offset, subflow->map_subflow_seq);
+       MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
+       tcp_sk(ssk)->copied_seq += incr;
+@@ -1200,7 +1200,7 @@ static bool subflow_check_data_avail(str
+               old_ack = READ_ONCE(msk->ack_seq);
+               ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
+-              pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
++              pr_debug("msk ack_seq=%llx subflow ack_seq=%llx\n", old_ack,
+                        ack_seq);
+               if (unlikely(before64(ack_seq, old_ack))) {
+                       mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
+@@ -1265,7 +1265,7 @@ bool mptcp_subflow_data_available(struct
+               subflow->map_valid = 0;
+               WRITE_ONCE(subflow->data_avail, 0);
+-              pr_debug("Done with mapping: seq=%u data_len=%u",
++              pr_debug("Done with mapping: seq=%u data_len=%u\n",
+                        subflow->map_subflow_seq,
+                        subflow->map_data_len);
+       }
+@@ -1366,7 +1366,7 @@ void mptcpv6_handle_mapped(struct sock *
+       target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
+-      pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
++      pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d\n",
+                subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
+       if (likely(icsk->icsk_af_ops == target))
+@@ -1463,7 +1463,7 @@ int __mptcp_subflow_connect(struct sock
+               goto failed;
+       mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
+-      pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
++      pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d\n", msk,
+                remote_token, local_id, remote_id);
+       subflow->remote_token = remote_token;
+       subflow->local_id = local_id;
+@@ -1588,7 +1588,7 @@ int mptcp_subflow_create_socket(struct s
+       SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
+       subflow = mptcp_subflow_ctx(sf->sk);
+-      pr_debug("subflow=%p", subflow);
++      pr_debug("subflow=%p\n", subflow);
+       *new_sock = sf;
+       sock_hold(sk);
+@@ -1612,7 +1612,7 @@ static struct mptcp_subflow_context *sub
+       INIT_LIST_HEAD(&ctx->node);
+       INIT_LIST_HEAD(&ctx->delegated_node);
+-      pr_debug("subflow=%p", ctx);
++      pr_debug("subflow=%p\n", ctx);
+       ctx->tcp_sock = sk;
+@@ -1693,7 +1693,7 @@ static int subflow_ulp_init(struct sock
+               goto out;
+       }
+-      pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
++      pr_debug("subflow=%p, family=%d\n", ctx, sk->sk_family);
+       tp->is_mptcp = 1;
+       ctx->icsk_af_ops = icsk->icsk_af_ops;
index 695ccfe967e56081f0cf290f9f8869334a9a3882..75d50ab5c4ed3372ba678349e4f63b29752edd51 100644 (file)
@@ -88,3 +88,16 @@ ila-call-nf_unregister_net_hooks-sooner.patch
 sched-sch_cake-fix-bulk-flow-accounting-logic-for-host-fairness.patch
 nilfs2-fix-missing-cleanup-on-rollforward-recovery-error.patch
 nilfs2-fix-state-management-in-error-path-of-log-writing-function.patch
+mptcp-pm-re-using-id-of-unused-flushed-subflows.patch
+mptcp-pm-only-decrement-add_addr_accepted-for-mpj-req.patch
+mptcp-pm-check-add_addr_accept_max-before-accepting-new-add_addr.patch
+mptcp-pm-fullmesh-select-the-right-id-later.patch
+mptcp-constify-a-bunch-of-of-helpers.patch
+mptcp-pm-avoid-possible-uaf-when-selecting-endp.patch
+mptcp-avoid-duplicated-sub_closed-events.patch
+mptcp-close-subflow-when-receiving-tcp-fin.patch
+mptcp-pm-add_addr-0-is-not-a-new-address.patch
+mptcp-pm-do-not-remove-already-closed-subflows.patch
+mptcp-pm-skip-connecting-to-already-established-sf.patch
+mptcp-pr_debug-add-missing-n-at-the-end.patch
+mptcp-pm-send-ack-on-an-active-subflow.patch