--- /dev/null
+From stable+bounces-192065-greg=kroah.com@vger.kernel.org Mon Nov 3 00:03:40 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 2 Nov 2025 10:03:32 -0500
+Subject: cpuidle: governors: menu: Rearrange main loop in menu_select()
+To: stable@vger.kernel.org
+Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>, Christian Loehle <christian.loehle@arm.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251102150333.3466275-1-sashal@kernel.org>
+
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+
+[ Upstream commit 17224c1d2574d29668c4879e1fbf36d6f68cd22b ]
+
+Reduce the indentation level in the main loop of menu_select() by
+rearranging some checks and assignments in it.
+
+No intentional functional impact.
+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Christian Loehle <christian.loehle@arm.com>
+Link: https://patch.msgid.link/2389215.ElGaqSPkdT@rafael.j.wysocki
+Stable-dep-of: db86f55bf81a ("cpuidle: governors: menu: Select polling state in some more cases")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cpuidle/governors/menu.c | 70 ++++++++++++++++++++-------------------
+ 1 file changed, 36 insertions(+), 34 deletions(-)
+
+--- a/drivers/cpuidle/governors/menu.c
++++ b/drivers/cpuidle/governors/menu.c
+@@ -311,45 +311,47 @@ static int menu_select(struct cpuidle_dr
+ if (s->exit_latency_ns > latency_req)
+ break;
+
+- if (s->target_residency_ns > predicted_ns) {
+- /*
+- * Use a physical idle state, not busy polling, unless
+- * a timer is going to trigger soon enough.
+- */
+- if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
+- s->target_residency_ns <= data->next_timer_ns) {
+- predicted_ns = s->target_residency_ns;
+- idx = i;
+- break;
+- }
+- if (predicted_ns < TICK_NSEC)
+- break;
+-
+- if (!tick_nohz_tick_stopped()) {
+- /*
+- * If the state selected so far is shallow,
+- * waking up early won't hurt, so retain the
+- * tick in that case and let the governor run
+- * again in the next iteration of the loop.
+- */
+- predicted_ns = drv->states[idx].target_residency_ns;
+- break;
+- }
++ if (s->target_residency_ns <= predicted_ns) {
++ idx = i;
++ continue;
++ }
++
++ /*
++ * Use a physical idle state, not busy polling, unless a timer
++ * is going to trigger soon enough.
++ */
++ if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
++ s->target_residency_ns <= data->next_timer_ns) {
++ predicted_ns = s->target_residency_ns;
++ idx = i;
++ break;
++ }
+
++ if (predicted_ns < TICK_NSEC)
++ break;
++
++ if (!tick_nohz_tick_stopped()) {
+ /*
+- * If the state selected so far is shallow and this
+- * state's target residency matches the time till the
+- * closest timer event, select this one to avoid getting
+- * stuck in the shallow one for too long.
++ * If the state selected so far is shallow, waking up
++ * early won't hurt, so retain the tick in that case and
++ * let the governor run again in the next iteration of
++ * the idle loop.
+ */
+- if (drv->states[idx].target_residency_ns < TICK_NSEC &&
+- s->target_residency_ns <= delta_tick)
+- idx = i;
+-
+- return idx;
++ predicted_ns = drv->states[idx].target_residency_ns;
++ break;
+ }
+
+- idx = i;
++ /*
++ * If the state selected so far is shallow and this state's
++ * target residency matches the time till the closest timer
++ * event, select this one to avoid getting stuck in the shallow
++ * one for too long.
++ */
++ if (drv->states[idx].target_residency_ns < TICK_NSEC &&
++ s->target_residency_ns <= delta_tick)
++ idx = i;
++
++ return idx;
+ }
+
+ if (idx == -1)
--- /dev/null
+From stable+bounces-192066-greg=kroah.com@vger.kernel.org Mon Nov 3 00:03:41 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 2 Nov 2025 10:03:33 -0500
+Subject: cpuidle: governors: menu: Select polling state in some more cases
+To: stable@vger.kernel.org
+Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>, Doug Smythies <dsmythies@telus.net>, Christian Loehle <christian.loehle@arm.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251102150333.3466275-2-sashal@kernel.org>
+
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+
+[ Upstream commit db86f55bf81a3a297be05ee8775ae9a8c6e3a599 ]
+
+A throughput regression of 11% introduced by commit 779b1a1cb13a ("cpuidle:
+governors: menu: Avoid selecting states with too much latency") has been
+reported and it is related to the case when the menu governor checks if
+selecting a proper idle state instead of a polling one makes sense.
+
+In particular, it is questionable to do so if the exit latency of the
+idle state in question exceeds the predicted idle duration, so add a
+check for that, which is sufficient to make the reported regression go
+away, and update the related code comment accordingly.
+
+Fixes: 779b1a1cb13a ("cpuidle: governors: menu: Avoid selecting states with too much latency")
+Closes: https://lore.kernel.org/linux-pm/004501dc43c9$ec8aa930$c59ffb90$@telus.net/
+Reported-by: Doug Smythies <dsmythies@telus.net>
+Tested-by: Doug Smythies <dsmythies@telus.net>
+Cc: All applicable <stable@vger.kernel.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Christian Loehle <christian.loehle@arm.com>
+Link: https://patch.msgid.link/12786727.O9o76ZdvQC@rafael.j.wysocki
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cpuidle/governors/menu.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/cpuidle/governors/menu.c
++++ b/drivers/cpuidle/governors/menu.c
+@@ -318,10 +318,13 @@ static int menu_select(struct cpuidle_dr
+
+ /*
+ * Use a physical idle state, not busy polling, unless a timer
+- * is going to trigger soon enough.
++ * is going to trigger soon enough or the exit latency of the
++ * idle state in question is greater than the predicted idle
++ * duration.
+ */
+ if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
+- s->target_residency_ns <= data->next_timer_ns) {
++ s->target_residency_ns <= data->next_timer_ns &&
++ s->exit_latency_ns <= predicted_ns) {
+ predicted_ns = s->target_residency_ns;
+ idx = i;
+ break;
--- /dev/null
+From stable+bounces-192085-greg=kroah.com@vger.kernel.org Mon Nov 3 05:14:56 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 2 Nov 2025 15:14:46 -0500
+Subject: mptcp: fix MSG_PEEK stream corruption
+To: stable@vger.kernel.org
+Cc: Paolo Abeni <pabeni@redhat.com>, Geliang Tang <geliang@kernel.org>, Mat Martineau <martineau@kernel.org>, "Matthieu Baerts (NGI0)" <matttbe@kernel.org>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251102201446.3587034-2-sashal@kernel.org>
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit 8e04ce45a8db7a080220e86e249198fa676b83dc ]
+
+If a MSG_PEEK | MSG_WAITALL read operation consumes all the bytes in the
+receive queue and recvmsg() need to waits for more data - i.e. it's a
+blocking one - upon arrival of the next packet the MPTCP protocol will
+start again copying the oldest data present in the receive queue,
+corrupting the data stream.
+
+Address the issue explicitly tracking the peeked sequence number,
+restarting from the last peeked byte.
+
+Fixes: ca4fb892579f ("mptcp: add MSG_PEEK support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Geliang Tang <geliang@kernel.org>
+Tested-by: Geliang Tang <geliang@kernel.org>
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20251028-net-mptcp-send-timeout-v1-2-38ffff5a9ec8@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/protocol.c | 38 +++++++++++++++++++++++++-------------
+ 1 file changed, 25 insertions(+), 13 deletions(-)
+
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -1892,22 +1892,36 @@ do_error:
+
+ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied);
+
+-static int __mptcp_recvmsg_mskq(struct sock *sk,
+- struct msghdr *msg,
+- size_t len, int flags,
++static int __mptcp_recvmsg_mskq(struct sock *sk, struct msghdr *msg,
++ size_t len, int flags, int copied_total,
+ struct scm_timestamping_internal *tss,
+ int *cmsg_flags)
+ {
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ struct sk_buff *skb, *tmp;
++ int total_data_len = 0;
+ int copied = 0;
+
+ skb_queue_walk_safe(&sk->sk_receive_queue, skb, tmp) {
+- u32 offset = MPTCP_SKB_CB(skb)->offset;
++ u32 delta, offset = MPTCP_SKB_CB(skb)->offset;
+ u32 data_len = skb->len - offset;
+- u32 count = min_t(size_t, len - copied, data_len);
++ u32 count;
+ int err;
+
++ if (flags & MSG_PEEK) {
++ /* skip already peeked skbs */
++ if (total_data_len + data_len <= copied_total) {
++ total_data_len += data_len;
++ continue;
++ }
++
++ /* skip the already peeked data in the current skb */
++ delta = copied_total - total_data_len;
++ offset += delta;
++ data_len -= delta;
++ }
++
++ count = min_t(size_t, len - copied, data_len);
+ if (!(flags & MSG_TRUNC)) {
+ err = skb_copy_datagram_msg(skb, offset, msg, count);
+ if (unlikely(err < 0)) {
+@@ -1924,16 +1938,14 @@ static int __mptcp_recvmsg_mskq(struct s
+
+ copied += count;
+
+- if (count < data_len) {
+- if (!(flags & MSG_PEEK)) {
++ if (!(flags & MSG_PEEK)) {
++ msk->bytes_consumed += count;
++ if (count < data_len) {
+ MPTCP_SKB_CB(skb)->offset += count;
+ MPTCP_SKB_CB(skb)->map_seq += count;
+- msk->bytes_consumed += count;
++ break;
+ }
+- break;
+- }
+
+- if (!(flags & MSG_PEEK)) {
+ /* avoid the indirect call, we know the destructor is sock_rfree */
+ skb->destructor = NULL;
+ skb->sk = NULL;
+@@ -1941,7 +1953,6 @@ static int __mptcp_recvmsg_mskq(struct s
+ sk_mem_uncharge(sk, skb->truesize);
+ __skb_unlink(skb, &sk->sk_receive_queue);
+ skb_attempt_defer_free(skb);
+- msk->bytes_consumed += count;
+ }
+
+ if (copied >= len)
+@@ -2164,7 +2175,8 @@ static int mptcp_recvmsg(struct sock *sk
+ while (copied < len) {
+ int err, bytes_read;
+
+- bytes_read = __mptcp_recvmsg_mskq(sk, msg, len - copied, flags, &tss, &cmsg_flags);
++ bytes_read = __mptcp_recvmsg_mskq(sk, msg, len - copied, flags,
++ copied, &tss, &cmsg_flags);
+ if (unlikely(bytes_read < 0)) {
+ if (!copied)
+ copied = bytes_read;
--- /dev/null
+From stable+bounces-192084-greg=kroah.com@vger.kernel.org Mon Nov 3 05:14:56 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 2 Nov 2025 15:14:45 -0500
+Subject: mptcp: leverage skb deferral free
+To: stable@vger.kernel.org
+Cc: Paolo Abeni <pabeni@redhat.com>, Geliang Tang <geliang@kernel.org>, "Matthieu Baerts (NGI0)" <matttbe@kernel.org>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251102201446.3587034-1-sashal@kernel.org>
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit 9aa59323f2709370cb4f01acbba599a9167f317b ]
+
+Usage of the skb deferral API is straight-forward; with multiple
+subflows actives this allow moving part of the received application
+load into multiple CPUs.
+
+Also fix a typo in the related comment.
+
+Reviewed-by: Geliang Tang <geliang@kernel.org>
+Tested-by: Geliang Tang <geliang@kernel.org>
+Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20250927-net-next-mptcp-rcv-path-imp-v1-1-5da266aa9c1a@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 8e04ce45a8db ("mptcp: fix MSG_PEEK stream corruption")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/protocol.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -1934,12 +1934,13 @@ static int __mptcp_recvmsg_mskq(struct s
+ }
+
+ if (!(flags & MSG_PEEK)) {
+- /* avoid the indirect call, we know the destructor is sock_wfree */
++ /* avoid the indirect call, we know the destructor is sock_rfree */
+ skb->destructor = NULL;
++ skb->sk = NULL;
+ atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
+ sk_mem_uncharge(sk, skb->truesize);
+ __skb_unlink(skb, &sk->sk_receive_queue);
+- __kfree_skb(skb);
++ skb_attempt_defer_free(skb);
+ msk->bytes_consumed += count;
+ }
+
--- /dev/null
+From stable+bounces-192058-greg=kroah.com@vger.kernel.org Sun Nov 2 23:26:30 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 2 Nov 2025 09:25:45 -0500
+Subject: PM: hibernate: Combine return paths in power_down()
+To: stable@vger.kernel.org
+Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>, "Mario Limonciello (AMD)" <superm1@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251102142546.3442128-1-sashal@kernel.org>
+
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+
+[ Upstream commit 1f5bcfe91ffce71bdd1022648b9d501d46d20c09 ]
+
+To avoid code duplication and improve clarity, combine the code
+paths in power_down() leading to a return from that function.
+
+No intentional functional impact.
+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Link: https://patch.msgid.link/3571055.QJadu78ljV@rafael.j.wysocki
+[ rjw: Changed the new label name to "exit" ]
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Stable-dep-of: 35e4a69b2003 ("PM: sleep: Allow pm_restrict_gfp_mask() stacking")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/power/hibernate.c | 32 ++++++++++++++------------------
+ 1 file changed, 14 insertions(+), 18 deletions(-)
+
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -708,21 +708,11 @@ static void power_down(void)
+ if (hibernation_mode == HIBERNATION_SUSPEND) {
+ pm_restore_gfp_mask();
+ error = suspend_devices_and_enter(mem_sleep_current);
+- if (error) {
+- hibernation_mode = hibernation_ops ?
+- HIBERNATION_PLATFORM :
+- HIBERNATION_SHUTDOWN;
+- } else {
+- /* Match pm_restore_gfp_mask() call in hibernate() */
+- pm_restrict_gfp_mask();
++ if (!error)
++ goto exit;
+
+- /* Restore swap signature. */
+- error = swsusp_unmark();
+- if (error)
+- pr_err("Swap will be unusable! Try swapon -a.\n");
+-
+- return;
+- }
++ hibernation_mode = hibernation_ops ? HIBERNATION_PLATFORM :
++ HIBERNATION_SHUTDOWN;
+ }
+ #endif
+
+@@ -733,12 +723,9 @@ static void power_down(void)
+ case HIBERNATION_PLATFORM:
+ error = hibernation_platform_enter();
+ if (error == -EAGAIN || error == -EBUSY) {
+- /* Match pm_restore_gfp_mask() in hibernate(). */
+- pm_restrict_gfp_mask();
+- swsusp_unmark();
+ events_check_enabled = false;
+ pr_info("Wakeup event detected during hibernation, rolling back.\n");
+- return;
++ goto exit;
+ }
+ fallthrough;
+ case HIBERNATION_SHUTDOWN:
+@@ -757,6 +744,15 @@ static void power_down(void)
+ pr_crit("Power down manually\n");
+ while (1)
+ cpu_relax();
++
++exit:
++ /* Match the pm_restore_gfp_mask() call in hibernate(). */
++ pm_restrict_gfp_mask();
++
++ /* Restore swap signature. */
++ error = swsusp_unmark();
++ if (error)
++ pr_err("Swap will be unusable! Try swapon -a.\n");
+ }
+
+ static int load_image_and_restore(void)
--- /dev/null
+From stable+bounces-192059-greg=kroah.com@vger.kernel.org Sun Nov 2 23:26:14 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 2 Nov 2025 09:25:46 -0500
+Subject: PM: sleep: Allow pm_restrict_gfp_mask() stacking
+To: stable@vger.kernel.org
+Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>, Askar Safin <safinaskar@gmail.com>, "Mario Limonciello (AMD)" <superm1@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251102142546.3442128-2-sashal@kernel.org>
+
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+
+[ Upstream commit 35e4a69b2003f20a69e7d19ae96ab1eef1aa8e8d ]
+
+Allow pm_restrict_gfp_mask() to be called many times in a row to avoid
+issues with calling dpm_suspend_start() when the GFP mask has been
+already restricted.
+
+Only the first invocation of pm_restrict_gfp_mask() will actually
+restrict the GFP mask and the subsequent calls will warn if there is
+a mismatch between the expected allowed GFP mask and the actual one.
+
+Moreover, if pm_restrict_gfp_mask() is called many times in a row,
+pm_restore_gfp_mask() needs to be called matching number of times in
+a row to actually restore the GFP mask. Calling it when the GFP mask
+has not been restricted will cause it to warn.
+
+This is necessary for the GFP mask restriction starting in
+hibernation_snapshot() to continue throughout the entire hibernation
+flow until it completes or it is aborted (either by a wakeup event or
+by an error).
+
+Fixes: 449c9c02537a1 ("PM: hibernate: Restrict GFP mask in hibernation_snapshot()")
+Fixes: 469d80a3712c ("PM: hibernate: Fix hybrid-sleep")
+Reported-by: Askar Safin <safinaskar@gmail.com>
+Closes: https://lore.kernel.org/linux-pm/20251025050812.421905-1-safinaskar@gmail.com/
+Link: https://lore.kernel.org/linux-pm/20251028111730.2261404-1-safinaskar@gmail.com/
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Tested-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Cc: 6.16+ <stable@vger.kernel.org> # 6.16+
+Link: https://patch.msgid.link/5935682.DvuYhMxLoT@rafael.j.wysocki
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/power/hibernate.c | 4 ----
+ kernel/power/main.c | 22 +++++++++++++++++-----
+ 2 files changed, 17 insertions(+), 9 deletions(-)
+
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -706,7 +706,6 @@ static void power_down(void)
+
+ #ifdef CONFIG_SUSPEND
+ if (hibernation_mode == HIBERNATION_SUSPEND) {
+- pm_restore_gfp_mask();
+ error = suspend_devices_and_enter(mem_sleep_current);
+ if (!error)
+ goto exit;
+@@ -746,9 +745,6 @@ static void power_down(void)
+ cpu_relax();
+
+ exit:
+- /* Match the pm_restore_gfp_mask() call in hibernate(). */
+- pm_restrict_gfp_mask();
+-
+ /* Restore swap signature. */
+ error = swsusp_unmark();
+ if (error)
+--- a/kernel/power/main.c
++++ b/kernel/power/main.c
+@@ -31,23 +31,35 @@
+ * held, unless the suspend/hibernate code is guaranteed not to run in parallel
+ * with that modification).
+ */
++static unsigned int saved_gfp_count;
+ static gfp_t saved_gfp_mask;
+
+ void pm_restore_gfp_mask(void)
+ {
+ WARN_ON(!mutex_is_locked(&system_transition_mutex));
+- if (saved_gfp_mask) {
+- gfp_allowed_mask = saved_gfp_mask;
+- saved_gfp_mask = 0;
+- }
++
++ if (WARN_ON(!saved_gfp_count) || --saved_gfp_count)
++ return;
++
++ gfp_allowed_mask = saved_gfp_mask;
++ saved_gfp_mask = 0;
++
++ pm_pr_dbg("GFP mask restored\n");
+ }
+
+ void pm_restrict_gfp_mask(void)
+ {
+ WARN_ON(!mutex_is_locked(&system_transition_mutex));
+- WARN_ON(saved_gfp_mask);
++
++ if (saved_gfp_count++) {
++ WARN_ON((saved_gfp_mask & ~(__GFP_IO | __GFP_FS)) != gfp_allowed_mask);
++ return;
++ }
++
+ saved_gfp_mask = gfp_allowed_mask;
+ gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
++
++ pm_pr_dbg("GFP mask restricted\n");
+ }
+
+ unsigned int lock_system_sleep(void)
drm-amd-display-fix-incorrect-return-of-vblank-enable-on-unconfigured-crtc.patch
drm-amd-display-don-t-program-blndgam_mem_pwr_force-when-cm-low-power-is-disabled-on-dcn30.patch
drm-amd-display-add-hdr-workaround-for-a-specific-edp.patch
+mptcp-leverage-skb-deferral-free.patch
+mptcp-fix-msg_peek-stream-corruption.patch
+cpuidle-governors-menu-rearrange-main-loop-in-menu_select.patch
+cpuidle-governors-menu-select-polling-state-in-some-more-cases.patch
+pm-hibernate-combine-return-paths-in-power_down.patch
+pm-sleep-allow-pm_restrict_gfp_mask-stacking.patch