]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 6.1
authorSasha Levin <sashal@kernel.org>
Sat, 25 Mar 2023 00:41:54 +0000 (20:41 -0400)
committerSasha Levin <sashal@kernel.org>
Sat, 25 Mar 2023 00:41:54 +0000 (20:41 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-6.1/drm-amd-display-fix-dp-mst-sinks-removal-issue.patch [new file with mode: 0644]
queue-6.1/mptcp-fix-uaf-in-listener-shutdown.patch [new file with mode: 0644]
queue-6.1/mptcp-use-the-workqueue-to-destroy-unaccepted-socket.patch [new file with mode: 0644]
queue-6.1/series

diff --git a/queue-6.1/drm-amd-display-fix-dp-mst-sinks-removal-issue.patch b/queue-6.1/drm-amd-display-fix-dp-mst-sinks-removal-issue.patch
new file mode 100644 (file)
index 0000000..881ab2a
--- /dev/null
@@ -0,0 +1,79 @@
+From 71e1bb08553bf507ffa069216c48d26845540738 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Mar 2023 17:08:41 -0500
+Subject: drm/amd/display: Fix DP MST sinks removal issue
+
+From: Cruise Hung <Cruise.Hung@amd.com>
+
+[ Upstream commit cbd6c1b17d3b42b7935526a86ad5f66838767d03 ]
+
+[Why]
+In USB4 DP tunneling, it's possible to have this scenario that
+the path becomes unavailable and CM tears down the path a little bit late.
+So, in this case, the HPD is high but fails to read any DPCD register.
+That causes the link connection type to be set to sst.
+And not all sinks are removed behind the MST branch.
+
+[How]
+Restore the link connection type if it fails to read DPCD register.
+
+Cc: stable@vger.kernel.org
+Cc: Mario Limonciello <mario.limonciello@amd.com>
+Reviewed-by: Wenjing Liu <Wenjing.Liu@amd.com>
+Acked-by: Qingqing Zhuo <qingqing.zhuo@amd.com>
+Signed-off-by: Cruise Hung <Cruise.Hung@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit cbd6c1b17d3b42b7935526a86ad5f66838767d03)
+Modified for stable backport as a lot of the code in this file was moved
+in 6.3 to drivers/gpu/drm/amd/display/dc/link/link_detection.c.
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 328c5e33cc66b..bf7fcd268cb47 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -1016,6 +1016,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
+       struct dc_sink *prev_sink = NULL;
+       struct dpcd_caps prev_dpcd_caps;
+       enum dc_connection_type new_connection_type = dc_connection_none;
++      enum dc_connection_type pre_connection_type = link->type;
+       const uint32_t post_oui_delay = 30; // 30ms
+       DC_LOGGER_INIT(link->ctx->logger);
+@@ -1118,6 +1119,8 @@ static bool detect_link_and_local_sink(struct dc_link *link,
+                       }
+                       if (!detect_dp(link, &sink_caps, reason)) {
++                              link->type = pre_connection_type;
++
+                               if (prev_sink)
+                                       dc_sink_release(prev_sink);
+                               return false;
+@@ -1349,6 +1352,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+       bool is_delegated_to_mst_top_mgr = false;
+       enum dc_connection_type pre_link_type = link->type;
++      DC_LOGGER_INIT(link->ctx->logger);
++
+       is_local_sink_detect_success = detect_link_and_local_sink(link, reason);
+       if (is_local_sink_detect_success && link->local_sink)
+@@ -1359,6 +1364,10 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+                       link->dpcd_caps.is_mst_capable)
+               is_delegated_to_mst_top_mgr = discover_dp_mst_topology(link, reason);
++      DC_LOG_DC("%s: link_index=%d is_local_sink_detect_success=%d pre_link_type=%d link_type=%d\n", __func__,
++               link->link_index, is_local_sink_detect_success, pre_link_type, link->type);
++
++
+       if (is_local_sink_detect_success &&
+                       pre_link_type == dc_connection_mst_branch &&
+                       link->type != dc_connection_mst_branch)
+-- 
+2.39.2
+
diff --git a/queue-6.1/mptcp-fix-uaf-in-listener-shutdown.patch b/queue-6.1/mptcp-fix-uaf-in-listener-shutdown.patch
new file mode 100644 (file)
index 0000000..2bd8be8
--- /dev/null
@@ -0,0 +1,199 @@
+From fdbcabf2f037f213ff3023adc0e338402c6a3509 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Mar 2023 18:49:02 +0100
+Subject: mptcp: fix UaF in listener shutdown
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit 0a3f4f1f9c27215e4ddcd312558342e57b93e518 ]
+
+  Backports notes: one simple conflict in net/mptcp/protocol.c with:
+
+    commit f8c9dfbd875b ("mptcp: add pm listener events")
+
+  Where one commit removes code in __mptcp_close_ssk() while the other
+  one adds one line at the same place. We can simply remove the whole
+  condition because this extra instruction is not present in v6.1.
+
+As reported by Christoph after having refactored the passive
+socket initialization, the mptcp listener shutdown path is prone
+to an UaF issue.
+
+  BUG: KASAN: use-after-free in _raw_spin_lock_bh+0x73/0xe0
+  Write of size 4 at addr ffff88810cb23098 by task syz-executor731/1266
+
+  CPU: 1 PID: 1266 Comm: syz-executor731 Not tainted 6.2.0-rc59af4eaa31c1f6c00c8f1e448ed99a45c66340dd5 #6
+  Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014
+  Call Trace:
+   <TASK>
+   dump_stack_lvl+0x6e/0x91
+   print_report+0x16a/0x46f
+   kasan_report+0xad/0x130
+   kasan_check_range+0x14a/0x1a0
+   _raw_spin_lock_bh+0x73/0xe0
+   subflow_error_report+0x6d/0x110
+   sk_error_report+0x3b/0x190
+   tcp_disconnect+0x138c/0x1aa0
+   inet_child_forget+0x6f/0x2e0
+   inet_csk_listen_stop+0x209/0x1060
+   __mptcp_close_ssk+0x52d/0x610
+   mptcp_destroy_common+0x165/0x640
+   mptcp_destroy+0x13/0x80
+   __mptcp_destroy_sock+0xe7/0x270
+   __mptcp_close+0x70e/0x9b0
+   mptcp_close+0x2b/0x150
+   inet_release+0xe9/0x1f0
+   __sock_release+0xd2/0x280
+   sock_close+0x15/0x20
+   __fput+0x252/0xa20
+   task_work_run+0x169/0x250
+   exit_to_user_mode_prepare+0x113/0x120
+   syscall_exit_to_user_mode+0x1d/0x40
+   do_syscall_64+0x48/0x90
+   entry_SYSCALL_64_after_hwframe+0x72/0xdc
+
+The msk grace period can legitly expire in between the last
+reference count dropped in mptcp_subflow_queue_clean() and
+the later eventual access in inet_csk_listen_stop()
+
+After the previous patch we don't need anymore special-casing
+msk listener socket cleanup: the mptcp worker will process each
+of the unaccepted msk sockets.
+
+Just drop the now unnecessary code.
+
+Please note this commit depends on the two parent ones:
+
+  mptcp: refactor passive socket initialization
+  mptcp: use the workqueue to destroy unaccepted sockets
+
+Fixes: 6aeed9045071 ("mptcp: fix race on unaccepted mptcp sockets")
+Cc: stable@vger.kernel.org
+Reported-and-tested-by: Christoph Paasch <cpaasch@apple.com>
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/346
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/protocol.c |  5 ---
+ net/mptcp/protocol.h |  1 -
+ net/mptcp/subflow.c  | 72 --------------------------------------------
+ 3 files changed, 78 deletions(-)
+
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index b679e8a430a83..f0cde2d7233dc 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2380,11 +2380,6 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+               mptcp_subflow_drop_ctx(ssk);
+       } else {
+               /* otherwise tcp will dispose of the ssk and subflow ctx */
+-              if (ssk->sk_state == TCP_LISTEN) {
+-                      tcp_set_state(ssk, TCP_CLOSE);
+-                      mptcp_subflow_queue_clean(sk, ssk);
+-                      inet_csk_listen_stop(ssk);
+-              }
+               __tcp_close(ssk, 0);
+               /* close acquired an extra ref */
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 2cddd5b52e8fa..051e8022d6611 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -615,7 +615,6 @@ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+                    struct mptcp_subflow_context *subflow);
+ void __mptcp_subflow_send_ack(struct sock *ssk);
+ void mptcp_subflow_reset(struct sock *ssk);
+-void mptcp_subflow_queue_clean(struct sock *sk, struct sock *ssk);
+ void mptcp_sock_graft(struct sock *sk, struct socket *parent);
+ struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);
+ bool __mptcp_close(struct sock *sk, long timeout);
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 459621a0410cd..fc876c2480029 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1764,78 +1764,6 @@ static void subflow_state_change(struct sock *sk)
+       }
+ }
+-void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
+-{
+-      struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
+-      struct mptcp_sock *msk, *next, *head = NULL;
+-      struct request_sock *req;
+-
+-      /* build a list of all unaccepted mptcp sockets */
+-      spin_lock_bh(&queue->rskq_lock);
+-      for (req = queue->rskq_accept_head; req; req = req->dl_next) {
+-              struct mptcp_subflow_context *subflow;
+-              struct sock *ssk = req->sk;
+-              struct mptcp_sock *msk;
+-
+-              if (!sk_is_mptcp(ssk))
+-                      continue;
+-
+-              subflow = mptcp_subflow_ctx(ssk);
+-              if (!subflow || !subflow->conn)
+-                      continue;
+-
+-              /* skip if already in list */
+-              msk = mptcp_sk(subflow->conn);
+-              if (msk->dl_next || msk == head)
+-                      continue;
+-
+-              msk->dl_next = head;
+-              head = msk;
+-      }
+-      spin_unlock_bh(&queue->rskq_lock);
+-      if (!head)
+-              return;
+-
+-      /* can't acquire the msk socket lock under the subflow one,
+-       * or will cause ABBA deadlock
+-       */
+-      release_sock(listener_ssk);
+-
+-      for (msk = head; msk; msk = next) {
+-              struct sock *sk = (struct sock *)msk;
+-              bool do_cancel_work;
+-
+-              lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+-              next = msk->dl_next;
+-              msk->first = NULL;
+-              msk->dl_next = NULL;
+-
+-              do_cancel_work = __mptcp_close(sk, 0);
+-              release_sock(sk);
+-              if (do_cancel_work) {
+-                      /* lockdep will report a false positive ABBA deadlock
+-                       * between cancel_work_sync and the listener socket.
+-                       * The involved locks belong to different sockets WRT
+-                       * the existing AB chain.
+-                       * Using a per socket key is problematic as key
+-                       * deregistration requires process context and must be
+-                       * performed at socket disposal time, in atomic
+-                       * context.
+-                       * Just tell lockdep to consider the listener socket
+-                       * released here.
+-                       */
+-                      mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_);
+-                      mptcp_cancel_work(sk);
+-                      mutex_acquire(&listener_sk->sk_lock.dep_map,
+-                                    SINGLE_DEPTH_NESTING, 0, _RET_IP_);
+-              }
+-              sock_put(sk);
+-      }
+-
+-      /* we are still under the listener msk socket lock */
+-      lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING);
+-}
+-
+ static int subflow_ulp_init(struct sock *sk)
+ {
+       struct inet_connection_sock *icsk = inet_csk(sk);
+-- 
+2.39.2
+
diff --git a/queue-6.1/mptcp-use-the-workqueue-to-destroy-unaccepted-socket.patch b/queue-6.1/mptcp-use-the-workqueue-to-destroy-unaccepted-socket.patch
new file mode 100644 (file)
index 0000000..b6f2c4e
--- /dev/null
@@ -0,0 +1,284 @@
+From e58ab1eda2048d580144039e539cb870965a4b44 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Mar 2023 18:49:01 +0100
+Subject: mptcp: use the workqueue to destroy unaccepted sockets
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit b6985b9b82954caa53f862d6059d06c0526254f0 ]
+
+  Backports notes: one simple conflict in net/mptcp/protocol.c with:
+
+    commit a5ef058dc4d9 ("net: introduce and use custom sockopt socket flag")
+
+  Where the two commits add a new line for different actions in the same
+  context in mptcp_stream_accept().
+
+Christoph reported a UaF at token lookup time after having
+refactored the passive socket initialization part:
+
+  BUG: KASAN: use-after-free in __token_bucket_busy+0x253/0x260
+  Read of size 4 at addr ffff88810698d5b0 by task syz-executor653/3198
+
+  CPU: 1 PID: 3198 Comm: syz-executor653 Not tainted 6.2.0-rc59af4eaa31c1f6c00c8f1e448ed99a45c66340dd5 #6
+  Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014
+  Call Trace:
+   <TASK>
+   dump_stack_lvl+0x6e/0x91
+   print_report+0x16a/0x46f
+   kasan_report+0xad/0x130
+   __token_bucket_busy+0x253/0x260
+   mptcp_token_new_connect+0x13d/0x490
+   mptcp_connect+0x4ed/0x860
+   __inet_stream_connect+0x80e/0xd90
+   tcp_sendmsg_fastopen+0x3ce/0x710
+   mptcp_sendmsg+0xff1/0x1a20
+   inet_sendmsg+0x11d/0x140
+   __sys_sendto+0x405/0x490
+   __x64_sys_sendto+0xdc/0x1b0
+   do_syscall_64+0x3b/0x90
+   entry_SYSCALL_64_after_hwframe+0x72/0xdc
+
+We need to properly clean-up all the paired MPTCP-level
+resources and be sure to release the msk last, even when
+the unaccepted subflow is destroyed by the TCP internals
+via inet_child_forget().
+
+We can re-use the existing MPTCP_WORK_CLOSE_SUBFLOW infra,
+explicitly checking that for the critical scenario: the
+closed subflow is the MPC one, the msk is not accepted and
+eventually going through full cleanup.
+
+With such change, __mptcp_destroy_sock() is always called
+on msk sockets, even on accepted ones. We don't need anymore
+to transiently drop one sk reference at msk clone time.
+
+Please note this commit depends on the parent one:
+
+  mptcp: refactor passive socket initialization
+
+Fixes: 58b09919626b ("mptcp: create msk early")
+Cc: stable@vger.kernel.org
+Reported-and-tested-by: Christoph Paasch <cpaasch@apple.com>
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/347
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/protocol.c | 41 +++++++++++++++++++++++++++++++----------
+ net/mptcp/protocol.h |  5 ++++-
+ net/mptcp/subflow.c  | 17 ++++++++++++-----
+ 3 files changed, 47 insertions(+), 16 deletions(-)
+
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 777f795246ed2..b679e8a430a83 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2357,7 +2357,6 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+               goto out;
+       }
+-      sock_orphan(ssk);
+       subflow->disposable = 1;
+       /* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops
+@@ -2365,7 +2364,20 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+        * reference owned by msk;
+        */
+       if (!inet_csk(ssk)->icsk_ulp_ops) {
++              WARN_ON_ONCE(!sock_flag(ssk, SOCK_DEAD));
+               kfree_rcu(subflow, rcu);
++      } else if (msk->in_accept_queue && msk->first == ssk) {
++              /* if the first subflow moved to a close state, e.g. due to
++               * incoming reset and we reach here before inet_child_forget()
++               * the TCP stack could later try to close it via
++               * inet_csk_listen_stop(), or deliver it to the user space via
++               * accept().
++               * We can't delete the subflow - or risk a double free - nor let
++               * the msk survive - or will be leaked in the non accept scenario:
++               * fallback and let TCP cope with the subflow cleanup.
++               */
++              WARN_ON_ONCE(sock_flag(ssk, SOCK_DEAD));
++              mptcp_subflow_drop_ctx(ssk);
+       } else {
+               /* otherwise tcp will dispose of the ssk and subflow ctx */
+               if (ssk->sk_state == TCP_LISTEN) {
+@@ -2412,9 +2424,10 @@ static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
+       return 0;
+ }
+-static void __mptcp_close_subflow(struct mptcp_sock *msk)
++static void __mptcp_close_subflow(struct sock *sk)
+ {
+       struct mptcp_subflow_context *subflow, *tmp;
++      struct mptcp_sock *msk = mptcp_sk(sk);
+       might_sleep();
+@@ -2428,7 +2441,15 @@ static void __mptcp_close_subflow(struct mptcp_sock *msk)
+               if (!skb_queue_empty_lockless(&ssk->sk_receive_queue))
+                       continue;
+-              mptcp_close_ssk((struct sock *)msk, ssk, subflow);
++              mptcp_close_ssk(sk, ssk, subflow);
++      }
++
++      /* if the MPC subflow has been closed before the msk is accepted,
++       * msk will never be accept-ed, close it now
++       */
++      if (!msk->first && msk->in_accept_queue) {
++              sock_set_flag(sk, SOCK_DEAD);
++              inet_sk_state_store(sk, TCP_CLOSE);
+       }
+ }
+@@ -2637,6 +2658,9 @@ static void mptcp_worker(struct work_struct *work)
+       __mptcp_check_send_data_fin(sk);
+       mptcp_check_data_fin(sk);
++      if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
++              __mptcp_close_subflow(sk);
++
+       /* There is no point in keeping around an orphaned sk timedout or
+        * closed, but we need the msk around to reply to incoming DATA_FIN,
+        * even if it is orphaned and in FIN_WAIT2 state
+@@ -2652,9 +2676,6 @@ static void mptcp_worker(struct work_struct *work)
+               }
+       }
+-      if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
+-              __mptcp_close_subflow(msk);
+-
+       if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
+               __mptcp_retrans(sk);
+@@ -3084,6 +3105,7 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
+       msk->local_key = subflow_req->local_key;
+       msk->token = subflow_req->token;
+       msk->subflow = NULL;
++      msk->in_accept_queue = 1;
+       WRITE_ONCE(msk->fully_established, false);
+       if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD)
+               WRITE_ONCE(msk->csum_enabled, true);
+@@ -3110,8 +3132,7 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
+       security_inet_csk_clone(nsk, req);
+       bh_unlock_sock(nsk);
+-      /* keep a single reference */
+-      __sock_put(nsk);
++      /* note: the newly allocated socket refcount is 2 now */
+       return nsk;
+ }
+@@ -3167,8 +3188,6 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
+                       goto out;
+               }
+-              /* acquire the 2nd reference for the owning socket */
+-              sock_hold(new_mptcp_sock);
+               newsk = new_mptcp_sock;
+               MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
+       } else {
+@@ -3726,6 +3745,8 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
+               struct mptcp_subflow_context *subflow;
+               struct sock *newsk = newsock->sk;
++              msk->in_accept_queue = 0;
++
+               lock_sock(newsk);
+               /* set ssk->sk_socket of accept()ed flows to mptcp socket.
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 6f22ae13c9848..2cddd5b52e8fa 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -286,7 +286,8 @@ struct mptcp_sock {
+       u8              recvmsg_inq:1,
+                       cork:1,
+                       nodelay:1,
+-                      fastopening:1;
++                      fastopening:1,
++                      in_accept_queue:1;
+       int             connect_flags;
+       struct work_struct work;
+       struct sk_buff  *ooo_last_skb;
+@@ -651,6 +652,8 @@ void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow);
+ bool mptcp_subflow_active(struct mptcp_subflow_context *subflow);
++void mptcp_subflow_drop_ctx(struct sock *ssk);
++
+ static inline void mptcp_subflow_tcp_fallback(struct sock *sk,
+                                             struct mptcp_subflow_context *ctx)
+ {
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index fe815103060c6..459621a0410cd 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -636,9 +636,10 @@ static bool subflow_hmac_valid(const struct request_sock *req,
+ static void mptcp_force_close(struct sock *sk)
+ {
+-      /* the msk is not yet exposed to user-space */
++      /* the msk is not yet exposed to user-space, and refcount is 2 */
+       inet_sk_state_store(sk, TCP_CLOSE);
+       sk_common_release(sk);
++      sock_put(sk);
+ }
+ static void subflow_ulp_fallback(struct sock *sk,
+@@ -654,7 +655,7 @@ static void subflow_ulp_fallback(struct sock *sk,
+       mptcp_subflow_ops_undo_override(sk);
+ }
+-static void subflow_drop_ctx(struct sock *ssk)
++void mptcp_subflow_drop_ctx(struct sock *ssk)
+ {
+       struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
+@@ -758,7 +759,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+                       if (new_msk)
+                               mptcp_copy_inaddrs(new_msk, child);
+-                      subflow_drop_ctx(child);
++                      mptcp_subflow_drop_ctx(child);
+                       goto out;
+               }
+@@ -849,7 +850,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+       return child;
+ dispose_child:
+-      subflow_drop_ctx(child);
++      mptcp_subflow_drop_ctx(child);
+       tcp_rsk(req)->drop_req = true;
+       inet_csk_prepare_for_destroy_sock(child);
+       tcp_done(child);
+@@ -1804,7 +1805,6 @@ void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_s
+               struct sock *sk = (struct sock *)msk;
+               bool do_cancel_work;
+-              sock_hold(sk);
+               lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+               next = msk->dl_next;
+               msk->first = NULL;
+@@ -1892,6 +1892,13 @@ static void subflow_ulp_release(struct sock *ssk)
+                * when the subflow is still unaccepted
+                */
+               release = ctx->disposable || list_empty(&ctx->node);
++
++              /* inet_child_forget() does not call sk_state_change(),
++               * explicitly trigger the socket close machinery
++               */
++              if (!release && !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW,
++                                                &mptcp_sk(sk)->flags))
++                      mptcp_schedule_work(sk);
+               sock_put(sk);
+       }
+-- 
+2.39.2
+
index fa4e98791a56c9452abfa3d040d0443a87715c50..b4e7b682304a9b7bdb4c959a374dcc8af38c3c04 100644 (file)
@@ -8,3 +8,6 @@ drm-amd-display-include-virtual-signal-to-set-k1-and.patch
 drm-amd-display-fix-k1-k2-divider-programming-for-ph.patch
 drm-amd-display-remove-otg-div-register-write-for-vi.patch
 mptcp-refactor-passive-socket-initialization.patch
+mptcp-use-the-workqueue-to-destroy-unaccepted-socket.patch
+mptcp-fix-uaf-in-listener-shutdown.patch
+drm-amd-display-fix-dp-mst-sinks-removal-issue.patch