]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.17-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 24 Nov 2025 13:58:49 +0000 (14:58 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 24 Nov 2025 13:58:49 +0000 (14:58 +0100)
added patches:
drm-amd-display-clear-the-cur_enable-register-on-dcn20-on-dpp5.patch
drm-amd-display-fix-pbn-to-kbps-conversion.patch
drm-amd-display-increase-dpcd-read-retries.patch
drm-amd-display-move-sleep-into-each-retry-for-retrieve_link_cap.patch
drm-amd-skip-power-ungate-during-suspend-for-vpe.patch
drm-amdgpu-skip-emit-de-meta-data-on-gfx11-with-rs64-enabled.patch
drm-plane-fix-create_in_format_blob-return-value.patch
drm-radeon-delete-radeon_fence_process-in-is_signaled-no-deadlock.patch
drm-tegra-dc-fix-reference-leak-in-tegra_dc_couple.patch
mptcp-avoid-unneeded-subflow-level-drops.patch
mptcp-decouple-mptcp-fastclose-from-tcp-close.patch
mptcp-do-not-fallback-when-ooo-is-present.patch
mptcp-fix-a-race-in-mptcp_pm_del_add_timer.patch
mptcp-fix-ack-generation-for-fallback-msk.patch
mptcp-fix-duplicate-reset-on-fastclose.patch
mptcp-fix-premature-close-in-case-of-fallback.patch
selftests-mptcp-join-endpoints-longer-timeout.patch
selftests-mptcp-join-userspace-longer-timeout.patch

19 files changed:
queue-6.17/drm-amd-display-clear-the-cur_enable-register-on-dcn20-on-dpp5.patch [new file with mode: 0644]
queue-6.17/drm-amd-display-fix-pbn-to-kbps-conversion.patch [new file with mode: 0644]
queue-6.17/drm-amd-display-increase-dpcd-read-retries.patch [new file with mode: 0644]
queue-6.17/drm-amd-display-move-sleep-into-each-retry-for-retrieve_link_cap.patch [new file with mode: 0644]
queue-6.17/drm-amd-skip-power-ungate-during-suspend-for-vpe.patch [new file with mode: 0644]
queue-6.17/drm-amdgpu-skip-emit-de-meta-data-on-gfx11-with-rs64-enabled.patch [new file with mode: 0644]
queue-6.17/drm-plane-fix-create_in_format_blob-return-value.patch [new file with mode: 0644]
queue-6.17/drm-radeon-delete-radeon_fence_process-in-is_signaled-no-deadlock.patch [new file with mode: 0644]
queue-6.17/drm-tegra-dc-fix-reference-leak-in-tegra_dc_couple.patch [new file with mode: 0644]
queue-6.17/mptcp-avoid-unneeded-subflow-level-drops.patch [new file with mode: 0644]
queue-6.17/mptcp-decouple-mptcp-fastclose-from-tcp-close.patch [new file with mode: 0644]
queue-6.17/mptcp-do-not-fallback-when-ooo-is-present.patch [new file with mode: 0644]
queue-6.17/mptcp-fix-a-race-in-mptcp_pm_del_add_timer.patch [new file with mode: 0644]
queue-6.17/mptcp-fix-ack-generation-for-fallback-msk.patch [new file with mode: 0644]
queue-6.17/mptcp-fix-duplicate-reset-on-fastclose.patch [new file with mode: 0644]
queue-6.17/mptcp-fix-premature-close-in-case-of-fallback.patch [new file with mode: 0644]
queue-6.17/selftests-mptcp-join-endpoints-longer-timeout.patch [new file with mode: 0644]
queue-6.17/selftests-mptcp-join-userspace-longer-timeout.patch [new file with mode: 0644]
queue-6.17/series

diff --git a/queue-6.17/drm-amd-display-clear-the-cur_enable-register-on-dcn20-on-dpp5.patch b/queue-6.17/drm-amd-display-clear-the-cur_enable-register-on-dcn20-on-dpp5.patch
new file mode 100644 (file)
index 0000000..4c2fc23
--- /dev/null
@@ -0,0 +1,49 @@
+From 5bab4c89390f32b2f491f49a151948cd226dd909 Mon Sep 17 00:00:00 2001
+From: Ivan Lipski <ivan.lipski@amd.com>
+Date: Wed, 5 Nov 2025 15:27:42 -0500
+Subject: drm/amd/display: Clear the CUR_ENABLE register on DCN20 on DPP5
+
+From: Ivan Lipski <ivan.lipski@amd.com>
+
+commit 5bab4c89390f32b2f491f49a151948cd226dd909 upstream.
+
+[Why]
+On DCN20 & DCN30, the 6th DPP's & HUBP's are powered on permanently and
+cannot be power gated. Thus, when dpp_reset() is invoked for the DPP5,
+while it's still powered on, the cached cursor_state
+(dpp_base->pos.cur0_ctl.bits.cur0_enable)
+and the actual state (CUR0_ENABLE) bit are unsycned. This can cause a
+double cursor in full screen with non-native scaling.
+
+[How]
+Force disable cursor on DPP5 on plane powerdown for ASICs w/ 6 DPPs/HUBPs.
+
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4673
+Reviewed-by: Aric Cyr <aric.cyr@amd.com>
+Signed-off-by: Ivan Lipski <ivan.lipski@amd.com>
+Tested-by: Dan Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 79b3c037f972dcb13e325a8eabfb8da835764e15)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+@@ -614,6 +614,14 @@ void dcn20_dpp_pg_control(
+                *              DOMAIN11_PGFSM_PWR_STATUS, pwr_status,
+                *              1, 1000);
+                */
++
++              /* Force disable cursor on plane powerdown on DPP 5 using dpp_force_disable_cursor */
++              if (!power_on) {
++                      struct dpp *dpp5 = hws->ctx->dc->res_pool->dpps[dpp_inst];
++                      if (dpp5 && dpp5->funcs->dpp_force_disable_cursor)
++                              dpp5->funcs->dpp_force_disable_cursor(dpp5);
++              }
++
+               break;
+       default:
+               BREAK_TO_DEBUGGER();
diff --git a/queue-6.17/drm-amd-display-fix-pbn-to-kbps-conversion.patch b/queue-6.17/drm-amd-display-fix-pbn-to-kbps-conversion.patch
new file mode 100644 (file)
index 0000000..451a834
--- /dev/null
@@ -0,0 +1,193 @@
+From 1788ef30725da53face7e311cdf62ad65fababcd Mon Sep 17 00:00:00 2001
+From: Fangzhi Zuo <Jerry.Zuo@amd.com>
+Date: Fri, 7 Nov 2025 15:01:30 -0500
+Subject: drm/amd/display: Fix pbn to kbps Conversion
+
+From: Fangzhi Zuo <Jerry.Zuo@amd.com>
+
+commit 1788ef30725da53face7e311cdf62ad65fababcd upstream.
+
+[Why]
+Existing routine has two conversion sequence,
+pbn_to_kbps and kbps_to_pbn with margin.
+Non of those has without-margin calculation.
+
+kbps_to_pbn with margin conversion includes
+fec overhead which has already been included in
+pbn_div calculation with 0.994 factor considered.
+It is a double counted fec overhead factor that causes
+potential bw loss.
+
+[How]
+Add without-margin calculation.
+Fix fec overhead double counted issue.
+
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/3735
+Reviewed-by: Aurabindo Pillai <aurabindo.pillai@amd.com>
+Signed-off-by: Fangzhi Zuo <Jerry.Zuo@amd.com>
+Signed-off-by: Ivan Lipski <ivan.lipski@amd.com>
+Tested-by: Dan Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit e0dec00f3d05e8c0eceaaebfdca217f8d10d380c)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c |   59 ++++--------
+ 1 file changed, 23 insertions(+), 36 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -852,26 +852,28 @@ struct dsc_mst_fairness_params {
+ };
+ #if defined(CONFIG_DRM_AMD_DC_FP)
+-static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
++static uint64_t kbps_to_pbn(int kbps, bool is_peak_pbn)
+ {
+-      u8 link_coding_cap;
+-      uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
++      uint64_t effective_kbps = (uint64_t)kbps;
+-      link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
+-      if (link_coding_cap == DP_128b_132b_ENCODING)
+-              fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
++      if (is_peak_pbn) {      // add 0.6% (1006/1000) overhead into effective kbps
++              effective_kbps *= 1006;
++              effective_kbps = div_u64(effective_kbps, 1000);
++      }
+-      return fec_overhead_multiplier_x1000;
++      return (uint64_t) DIV64_U64_ROUND_UP(effective_kbps * 64, (54 * 8 * 1000));
+ }
+-static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
++static uint32_t pbn_to_kbps(unsigned int pbn, bool with_margin)
+ {
+-      u64 peak_kbps = kbps;
++      uint64_t pbn_effective = (uint64_t)pbn;
++
++      if (with_margin)        // deduct 0.6% (994/1000) overhead from effective pbn
++              pbn_effective *= (1000000 / PEAK_FACTOR_X1000);
++      else
++              pbn_effective *= 1000;
+-      peak_kbps *= 1006;
+-      peak_kbps *= fec_overhead_multiplier_x1000;
+-      peak_kbps = div_u64(peak_kbps, 1000 * 1000);
+-      return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
++      return DIV_U64_ROUND_UP(pbn_effective * 8 * 54, 64);
+ }
+ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
+@@ -942,7 +944,7 @@ static int bpp_x16_from_pbn(struct dsc_m
+       dc_dsc_get_default_config_option(param.sink->ctx->dc, &dsc_options);
+       dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16;
+-      kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
++      kbps = pbn_to_kbps(pbn, false);
+       dc_dsc_compute_config(
+                       param.sink->ctx->dc->res_pool->dscs[0],
+                       &param.sink->dsc_caps.dsc_dec_caps,
+@@ -971,12 +973,11 @@ static int increase_dsc_bpp(struct drm_a
+       int link_timeslots_used;
+       int fair_pbn_alloc;
+       int ret = 0;
+-      uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
+       for (i = 0; i < count; i++) {
+               if (vars[i + k].dsc_enabled) {
+                       initial_slack[i] =
+-                      kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
++                      kbps_to_pbn(params[i].bw_range.max_kbps, false) - vars[i + k].pbn;
+                       bpp_increased[i] = false;
+                       remaining_to_increase += 1;
+               } else {
+@@ -1072,7 +1073,6 @@ static int try_disable_dsc(struct drm_at
+       int next_index;
+       int remaining_to_try = 0;
+       int ret;
+-      uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
+       int var_pbn;
+       for (i = 0; i < count; i++) {
+@@ -1105,7 +1105,7 @@ static int try_disable_dsc(struct drm_at
+               DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index);
+               var_pbn = vars[next_index].pbn;
+-              vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
++              vars[next_index].pbn = kbps_to_pbn(params[next_index].bw_range.stream_kbps, true);
+               ret = drm_dp_atomic_find_time_slots(state,
+                                                   params[next_index].port->mgr,
+                                                   params[next_index].port,
+@@ -1165,7 +1165,6 @@ static int compute_mst_dsc_configs_for_l
+       int count = 0;
+       int i, k, ret;
+       bool debugfs_overwrite = false;
+-      uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
+       struct drm_connector_state *new_conn_state;
+       memset(params, 0, sizeof(params));
+@@ -1246,7 +1245,7 @@ static int compute_mst_dsc_configs_for_l
+       DRM_DEBUG_DRIVER("MST_DSC Try no compression\n");
+       for (i = 0; i < count; i++) {
+               vars[i + k].aconnector = params[i].aconnector;
+-              vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
++              vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false);
+               vars[i + k].dsc_enabled = false;
+               vars[i + k].bpp_x16 = 0;
+               ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
+@@ -1268,7 +1267,7 @@ static int compute_mst_dsc_configs_for_l
+       DRM_DEBUG_DRIVER("MST_DSC Try max compression\n");
+       for (i = 0; i < count; i++) {
+               if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
+-                      vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
++                      vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.min_kbps, false);
+                       vars[i + k].dsc_enabled = true;
+                       vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
+                       ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
+@@ -1276,7 +1275,7 @@ static int compute_mst_dsc_configs_for_l
+                       if (ret < 0)
+                               return ret;
+               } else {
+-                      vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
++                      vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false);
+                       vars[i + k].dsc_enabled = false;
+                       vars[i + k].bpp_x16 = 0;
+                       ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
+@@ -1731,18 +1730,6 @@ clean_exit:
+       return ret;
+ }
+-static uint32_t kbps_from_pbn(unsigned int pbn)
+-{
+-      uint64_t kbps = (uint64_t)pbn;
+-
+-      kbps *= (1000000 / PEAK_FACTOR_X1000);
+-      kbps *= 8;
+-      kbps *= 54;
+-      kbps /= 64;
+-
+-      return (uint32_t)kbps;
+-}
+-
+ static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
+                                         struct dc_dsc_bw_range *bw_range)
+ {
+@@ -1835,7 +1822,7 @@ enum dc_status dm_dp_mst_is_port_support
+                       dc_link_get_highest_encoding_format(stream->link));
+       cur_link_settings = stream->link->verified_link_cap;
+       root_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings);
+-      virtual_channel_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
++      virtual_channel_bw_in_kbps = pbn_to_kbps(aconnector->mst_output_port->full_pbn, true);
+       /* pick the end to end bw bottleneck */
+       end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
+@@ -1886,7 +1873,7 @@ enum dc_status dm_dp_mst_is_port_support
+                               immediate_upstream_port = aconnector->mst_output_port->parent->port_parent;
+                       if (immediate_upstream_port) {
+-                              virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn);
++                              virtual_channel_bw_in_kbps = pbn_to_kbps(immediate_upstream_port->full_pbn, true);
+                               virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
+                       } else {
+                               /* For topology LCT 1 case - only one mstb*/
diff --git a/queue-6.17/drm-amd-display-increase-dpcd-read-retries.patch b/queue-6.17/drm-amd-display-increase-dpcd-read-retries.patch
new file mode 100644 (file)
index 0000000..877c14a
--- /dev/null
@@ -0,0 +1,41 @@
+From 8612badc331bcab2068baefa69e1458085ed89e3 Mon Sep 17 00:00:00 2001
+From: "Mario Limonciello (AMD)" <superm1@kernel.org>
+Date: Mon, 3 Nov 2025 12:11:31 -0600
+Subject: drm/amd/display: Increase DPCD read retries
+
+From: Mario Limonciello (AMD) <superm1@kernel.org>
+
+commit 8612badc331bcab2068baefa69e1458085ed89e3 upstream.
+
+[Why]
+Empirical measurement of some monitors that fail to read EDID while
+booting shows that the number of retries with a 30ms delay between
+tries is as high as 16.
+
+[How]
+Increase number of retries to 20.
+
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4672
+Reviewed-by: Alex Hung <alex.hung@amd.com>
+Signed-off-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Signed-off-by: Ivan Lipski <ivan.lipski@amd.com>
+Tested-by: Dan Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit ad1c59ad7cf74ec06e32fe2c330ac1e957222288)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+@@ -1691,7 +1691,7 @@ static bool retrieve_link_cap(struct dc_
+       union edp_configuration_cap edp_config_cap;
+       union dp_downstream_port_present ds_port = { 0 };
+       enum dc_status status = DC_ERROR_UNEXPECTED;
+-      uint32_t read_dpcd_retry_cnt = 3;
++      uint32_t read_dpcd_retry_cnt = 20;
+       int i;
+       struct dp_sink_hw_fw_revision dp_hw_fw_revision;
+       const uint32_t post_oui_delay = 30; // 30ms
diff --git a/queue-6.17/drm-amd-display-move-sleep-into-each-retry-for-retrieve_link_cap.patch b/queue-6.17/drm-amd-display-move-sleep-into-each-retry-for-retrieve_link_cap.patch
new file mode 100644 (file)
index 0000000..4115558
--- /dev/null
@@ -0,0 +1,56 @@
+From 71ad9054c1f241be63f9d11df8cbd0aa0352fe16 Mon Sep 17 00:00:00 2001
+From: "Mario Limonciello (AMD)" <superm1@kernel.org>
+Date: Mon, 3 Nov 2025 11:17:44 -0600
+Subject: drm/amd/display: Move sleep into each retry for retrieve_link_cap()
+
+From: Mario Limonciello (AMD) <superm1@kernel.org>
+
+commit 71ad9054c1f241be63f9d11df8cbd0aa0352fe16 upstream.
+
+[Why]
+When a monitor is booting it's possible that it isn't ready to retrieve
+link caps and this can lead to an EDID read failure:
+
+```
+[drm:retrieve_link_cap [amdgpu]] *ERROR* retrieve_link_cap: Read receiver caps dpcd data failed.
+amdgpu 0000:c5:00.0: [drm] *ERROR* No EDID read.
+```
+
+[How]
+Rather than msleep once and try a few times, msleep each time.  Should
+be no changes for existing working monitors, but should correct reading
+caps on a monitor that is slow to boot.
+
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4672
+Reviewed-by: Alex Hung <alex.hung@amd.com>
+Signed-off-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Signed-off-by: Ivan Lipski <ivan.lipski@amd.com>
+Tested-by: Dan Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 669dca37b3348a447db04bbdcbb3def94d5997cc)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c |    9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+@@ -1734,12 +1734,13 @@ static bool retrieve_link_cap(struct dc_
+       }
+       dpcd_set_source_specific_data(link);
+-      /* Sink may need to configure internals based on vendor, so allow some
+-       * time before proceeding with possibly vendor specific transactions
+-       */
+-      msleep(post_oui_delay);
+       for (i = 0; i < read_dpcd_retry_cnt; i++) {
++              /*
++               * Sink may need to configure internals based on vendor, so allow some
++               * time before proceeding with possibly vendor specific transactions
++               */
++              msleep(post_oui_delay);
+               status = core_link_read_dpcd(
+                               link,
+                               DP_DPCD_REV,
diff --git a/queue-6.17/drm-amd-skip-power-ungate-during-suspend-for-vpe.patch b/queue-6.17/drm-amd-skip-power-ungate-during-suspend-for-vpe.patch
new file mode 100644 (file)
index 0000000..8c34025
--- /dev/null
@@ -0,0 +1,42 @@
+From 31ab31433c9bd2f255c48dc6cb9a99845c58b1e4 Mon Sep 17 00:00:00 2001
+From: Mario Limonciello <mario.limonciello@amd.com>
+Date: Tue, 18 Nov 2025 07:18:10 -0600
+Subject: drm/amd: Skip power ungate during suspend for VPE
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+commit 31ab31433c9bd2f255c48dc6cb9a99845c58b1e4 upstream.
+
+During the suspend sequence VPE is already going to be power gated
+as part of vpe_suspend().  It's unnecessary to call during calls to
+amdgpu_device_set_pg_state().
+
+It actually can expose a race condition with the firmware if s0i3
+sequence starts as well.  Drop these calls.
+
+Cc: Peyton.Lee@amd.com
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 2a6c826cfeedd7714611ac115371a959ead55bda)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3360,10 +3360,11 @@ int amdgpu_device_set_pg_state(struct am
+                   (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
+                    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
+                       continue;
+-              /* skip CG for VCE/UVD, it's handled specially */
++              /* skip CG for VCE/UVD/VPE, it's handled specially */
+               if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
+                   adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
+                   adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
++                  adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VPE &&
+                   adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
+                   adev->ip_blocks[i].version->funcs->set_powergating_state) {
+                       /* enable powergating to save power */
diff --git a/queue-6.17/drm-amdgpu-skip-emit-de-meta-data-on-gfx11-with-rs64-enabled.patch b/queue-6.17/drm-amdgpu-skip-emit-de-meta-data-on-gfx11-with-rs64-enabled.patch
new file mode 100644 (file)
index 0000000..84a07f8
--- /dev/null
@@ -0,0 +1,43 @@
+From 80d8a9ad1587b64c545d515ab6cb7ecb9908e1b3 Mon Sep 17 00:00:00 2001
+From: Yifan Zha <Yifan.Zha@amd.com>
+Date: Fri, 14 Nov 2025 17:48:58 +0800
+Subject: drm/amdgpu: Skip emit de meta data on gfx11 with rs64 enabled
+
+From: Yifan Zha <Yifan.Zha@amd.com>
+
+commit 80d8a9ad1587b64c545d515ab6cb7ecb9908e1b3 upstream.
+
+[Why]
+Accoreding to CP updated to RS64 on gfx11,
+WRITE_DATA with PREEMPTION_META_MEMORY(dst_sel=8) is illegal for CP FW.
+That packet is used for MCBP on F32 based system.
+So it would lead to incorrect GRBM write and FW is not handling that
+extra case correctly.
+
+[How]
+With gfx11 rs64 enabled, skip emit de meta data.
+
+Signed-off-by: Yifan Zha <Yifan.Zha@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 8366cd442d226463e673bed5d199df916f4ecbcf)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+@@ -5874,9 +5874,9 @@ static void gfx_v11_0_ring_emit_ib_gfx(s
+               if (flags & AMDGPU_IB_PREEMPTED)
+                       control |= INDIRECT_BUFFER_PRE_RESUME(1);
+-              if (vmid)
++              if (vmid && !ring->adev->gfx.rs64_enable)
+                       gfx_v11_0_ring_emit_de_meta(ring,
+-                                  (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
++                              !amdgpu_sriov_vf(ring->adev) && (flags & AMDGPU_IB_PREEMPTED));
+       }
+       amdgpu_ring_write(ring, header);
diff --git a/queue-6.17/drm-plane-fix-create_in_format_blob-return-value.patch b/queue-6.17/drm-plane-fix-create_in_format_blob-return-value.patch
new file mode 100644 (file)
index 0000000..60f9bda
--- /dev/null
@@ -0,0 +1,53 @@
+From cead55e24cf9e092890cf51c0548eccd7569defa Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
+Date: Thu, 13 Nov 2025 01:30:28 +0200
+Subject: drm/plane: Fix create_in_format_blob() return value
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+commit cead55e24cf9e092890cf51c0548eccd7569defa upstream.
+
+create_in_format_blob() is either supposed to return a valid
+pointer or an error, but never NULL. The caller will dereference
+the blob when it is not an error, and thus will oops if NULL
+returned. Return proper error values in the failure cases.
+
+Cc: stable@vger.kernel.org
+Cc: Arun R Murthy <arun.r.murthy@intel.com>
+Fixes: 0d6dcd741c26 ("drm/plane: modify create_in_formats to acommodate async")
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patch.msgid.link/20251112233030.24117-2-ville.syrjala@linux.intel.com
+Reviewed-by: Arun R Murthy <arun.r.murthy@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/drm_plane.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
+index 38f82391bfda..a30493ed9715 100644
+--- a/drivers/gpu/drm/drm_plane.c
++++ b/drivers/gpu/drm/drm_plane.c
+@@ -210,7 +210,7 @@ static struct drm_property_blob *create_in_format_blob(struct drm_device *dev,
+       formats_size = sizeof(__u32) * plane->format_count;
+       if (WARN_ON(!formats_size)) {
+               /* 0 formats are never expected */
+-              return 0;
++              return ERR_PTR(-EINVAL);
+       }
+       modifiers_size =
+@@ -226,7 +226,7 @@ static struct drm_property_blob *create_in_format_blob(struct drm_device *dev,
+       blob = drm_property_create_blob(dev, blob_size, NULL);
+       if (IS_ERR(blob))
+-              return NULL;
++              return blob;
+       blob_data = blob->data;
+       blob_data->version = FORMAT_BLOB_CURRENT;
+-- 
+2.52.0
+
diff --git a/queue-6.17/drm-radeon-delete-radeon_fence_process-in-is_signaled-no-deadlock.patch b/queue-6.17/drm-radeon-delete-radeon_fence_process-in-is_signaled-no-deadlock.patch
new file mode 100644 (file)
index 0000000..56c5924
--- /dev/null
@@ -0,0 +1,46 @@
+From 9eb00b5f5697bd56baa3222c7a1426fa15bacfb5 Mon Sep 17 00:00:00 2001
+From: Robert McClinton <rbmccav@gmail.com>
+Date: Sun, 16 Nov 2025 12:33:21 -0500
+Subject: drm/radeon: delete radeon_fence_process in is_signaled, no deadlock
+
+From: Robert McClinton <rbmccav@gmail.com>
+
+commit 9eb00b5f5697bd56baa3222c7a1426fa15bacfb5 upstream.
+
+Delete the attempt to progress the queue when checking if fence is
+signaled. This avoids deadlock.
+
+dma-fence_ops::signaled can be called with the fence lock in unknown
+state. For radeon, the fence lock is also the wait queue lock. This can
+cause a self deadlock when signaled() tries to make forward progress on
+the wait queue. But advancing the queue is unneeded because incorrectly
+returning false from signaled() is perfectly acceptable.
+
+Link: https://github.com/brave/brave-browser/issues/49182
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4641
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Robert McClinton <rbmccav@gmail.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 527ba26e50ec2ca2be9c7c82f3ad42998a75d0db)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/radeon/radeon_fence.c |    7 -------
+ 1 file changed, 7 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/radeon_fence.c
++++ b/drivers/gpu/drm/radeon/radeon_fence.c
+@@ -360,13 +360,6 @@ static bool radeon_fence_is_signaled(str
+       if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
+               return true;
+-      if (down_read_trylock(&rdev->exclusive_lock)) {
+-              radeon_fence_process(rdev, ring);
+-              up_read(&rdev->exclusive_lock);
+-
+-              if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
+-                      return true;
+-      }
+       return false;
+ }
diff --git a/queue-6.17/drm-tegra-dc-fix-reference-leak-in-tegra_dc_couple.patch b/queue-6.17/drm-tegra-dc-fix-reference-leak-in-tegra_dc_couple.patch
new file mode 100644 (file)
index 0000000..5d7563a
--- /dev/null
@@ -0,0 +1,37 @@
+From 4c5376b4b143c4834ebd392aef2215847752b16a Mon Sep 17 00:00:00 2001
+From: Ma Ke <make24@iscas.ac.cn>
+Date: Wed, 22 Oct 2025 19:47:20 +0800
+Subject: drm/tegra: dc: Fix reference leak in tegra_dc_couple()
+
+From: Ma Ke <make24@iscas.ac.cn>
+
+commit 4c5376b4b143c4834ebd392aef2215847752b16a upstream.
+
+driver_find_device() calls get_device() to increment the reference
+count once a matching device is found, but there is no put_device() to
+balance the reference count. To avoid reference count leakage, add
+put_device() to decrease the reference count.
+
+Found by code review.
+
+Cc: stable@vger.kernel.org
+Fixes: a31500fe7055 ("drm/tegra: dc: Restore coupling of display controllers")
+Signed-off-by: Ma Ke <make24@iscas.ac.cn>
+Acked-by: Mikko Perttunen <mperttunen@nvidia.com>
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+Link: https://patch.msgid.link/20251022114720.24937-1-make24@iscas.ac.cn
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/tegra/dc.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/gpu/drm/tegra/dc.c
++++ b/drivers/gpu/drm/tegra/dc.c
+@@ -3148,6 +3148,7 @@ static int tegra_dc_couple(struct tegra_
+               dc->client.parent = &parent->client;
+               dev_dbg(dc->dev, "coupled to %s\n", dev_name(companion));
++              put_device(companion);
+       }
+       return 0;
diff --git a/queue-6.17/mptcp-avoid-unneeded-subflow-level-drops.patch b/queue-6.17/mptcp-avoid-unneeded-subflow-level-drops.patch
new file mode 100644 (file)
index 0000000..56d3aaa
--- /dev/null
@@ -0,0 +1,116 @@
+From 4f102d747cadd8f595f2b25882eed9bec1675fb1 Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Tue, 18 Nov 2025 08:20:20 +0100
+Subject: mptcp: avoid unneeded subflow-level drops
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit 4f102d747cadd8f595f2b25882eed9bec1675fb1 upstream.
+
+The rcv window is shared among all the subflows. Currently, MPTCP sync
+the TCP-level rcv window with the MPTCP one at tcp_transmit_skb() time.
+
+The above means that incoming data may sporadically observe outdated
+TCP-level rcv window and being wrongly dropped by TCP.
+
+Address the issue checking for the edge condition before queuing the
+data at TCP level, and eventually syncing the rcv window as needed.
+
+Note that the issue is actually present from the very first MPTCP
+implementation, but backports older than the blamed commit below will
+range from impossible to useless.
+
+Before:
+
+  $ nstat -n; sleep 1; nstat -z TcpExtBeyondWindow
+  TcpExtBeyondWindow              14                 0.0
+
+After:
+
+  $ nstat -n; sleep 1; nstat -z TcpExtBeyondWindow
+  TcpExtBeyondWindow              0                  0.0
+
+Fixes: fa3fe2b15031 ("mptcp: track window announced to peer")
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20251118-net-mptcp-misc-fixes-6-18-rc6-v1-2-806d3781c95f@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/options.c  |   31 +++++++++++++++++++++++++++++++
+ net/mptcp/protocol.h |    1 +
+ 2 files changed, 32 insertions(+)
+
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -1044,6 +1044,31 @@ static void __mptcp_snd_una_update(struc
+       WRITE_ONCE(msk->snd_una, new_snd_una);
+ }
++static void rwin_update(struct mptcp_sock *msk, struct sock *ssk,
++                      struct sk_buff *skb)
++{
++      struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
++      struct tcp_sock *tp = tcp_sk(ssk);
++      u64 mptcp_rcv_wnd;
++
++      /* Avoid touching extra cachelines if TCP is going to accept this
++       * skb without filling the TCP-level window even with a possibly
++       * outdated mptcp-level rwin.
++       */
++      if (!skb->len || skb->len < tcp_receive_window(tp))
++              return;
++
++      mptcp_rcv_wnd = atomic64_read(&msk->rcv_wnd_sent);
++      if (!after64(mptcp_rcv_wnd, subflow->rcv_wnd_sent))
++              return;
++
++      /* Some other subflow grew the mptcp-level rwin since rcv_wup,
++       * resync.
++       */
++      tp->rcv_wnd += mptcp_rcv_wnd - subflow->rcv_wnd_sent;
++      subflow->rcv_wnd_sent = mptcp_rcv_wnd;
++}
++
+ static void ack_update_msk(struct mptcp_sock *msk,
+                          struct sock *ssk,
+                          struct mptcp_options_received *mp_opt)
+@@ -1211,6 +1236,7 @@ bool mptcp_incoming_options(struct sock
+        */
+       if (mp_opt.use_ack)
+               ack_update_msk(msk, sk, &mp_opt);
++      rwin_update(msk, sk, skb);
+       /* Zero-data-length packets are dropped by the caller and not
+        * propagated to the MPTCP layer, so the skb extension does not
+@@ -1297,6 +1323,10 @@ static void mptcp_set_rwin(struct tcp_so
+       if (rcv_wnd_new != rcv_wnd_old) {
+ raise_win:
++              /* The msk-level rcv wnd is after the tcp level one,
++               * sync the latter.
++               */
++              rcv_wnd_new = rcv_wnd_old;
+               win = rcv_wnd_old - ack_seq;
+               tp->rcv_wnd = min_t(u64, win, U32_MAX);
+               new_win = tp->rcv_wnd;
+@@ -1320,6 +1350,7 @@ raise_win:
+ update_wspace:
+       WRITE_ONCE(msk->old_wspace, tp->rcv_wnd);
++      subflow->rcv_wnd_sent = rcv_wnd_new;
+ }
+ static void mptcp_track_rwin(struct tcp_sock *tp)
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -509,6 +509,7 @@ struct mptcp_subflow_context {
+       u64     remote_key;
+       u64     idsn;
+       u64     map_seq;
++      u64     rcv_wnd_sent;
+       u32     snd_isn;
+       u32     token;
+       u32     rel_write_seq;
diff --git a/queue-6.17/mptcp-decouple-mptcp-fastclose-from-tcp-close.patch b/queue-6.17/mptcp-decouple-mptcp-fastclose-from-tcp-close.patch
new file mode 100644 (file)
index 0000000..9f663a8
--- /dev/null
@@ -0,0 +1,96 @@
+From fff0c87996672816a84c3386797a5e69751c5888 Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Tue, 18 Nov 2025 08:20:23 +0100
+Subject: mptcp: decouple mptcp fastclose from tcp close
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit fff0c87996672816a84c3386797a5e69751c5888 upstream.
+
+With the current fastclose implementation, the mptcp_do_fastclose()
+helper is in charge of two distinct actions: send the fastclose reset
+and cleanup the subflows.
+
+Formally decouple the two steps, ensuring that mptcp explicitly closes
+all the subflows after the mentioned helper.
+
+This will make the upcoming fix simpler, and allows dropping the 2nd
+argument from mptcp_destroy_common(). The Fixes tag is then the same as
+in the next commit to help with the backports.
+
+Fixes: d21f83485518 ("mptcp: use fastclose on more edge scenarios")
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Geliang Tang <geliang@kernel.org>
+Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20251118-net-mptcp-misc-fixes-6-18-rc6-v1-5-806d3781c95f@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/protocol.c |   13 +++++++++----
+ net/mptcp/protocol.h |    2 +-
+ 2 files changed, 10 insertions(+), 5 deletions(-)
+
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2785,7 +2785,11 @@ static void mptcp_worker(struct work_str
+               __mptcp_close_subflow(sk);
+       if (mptcp_close_tout_expired(sk)) {
++              struct mptcp_subflow_context *subflow, *tmp;
++
+               mptcp_do_fastclose(sk);
++              mptcp_for_each_subflow_safe(msk, subflow, tmp)
++                      __mptcp_close_ssk(sk, subflow->tcp_sock, subflow, 0);
+               mptcp_close_wake_up(sk);
+       }
+@@ -3210,7 +3214,8 @@ static int mptcp_disconnect(struct sock
+       /* msk->subflow is still intact, the following will not free the first
+        * subflow
+        */
+-      mptcp_destroy_common(msk, MPTCP_CF_FASTCLOSE);
++      mptcp_do_fastclose(sk);
++      mptcp_destroy_common(msk);
+       /* The first subflow is already in TCP_CLOSE status, the following
+        * can't overlap with a fallback anymore
+@@ -3389,7 +3394,7 @@ void mptcp_rcv_space_init(struct mptcp_s
+               msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
+ }
+-void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags)
++void mptcp_destroy_common(struct mptcp_sock *msk)
+ {
+       struct mptcp_subflow_context *subflow, *tmp;
+       struct sock *sk = (struct sock *)msk;
+@@ -3398,7 +3403,7 @@ void mptcp_destroy_common(struct mptcp_s
+       /* join list will be eventually flushed (with rst) at sock lock release time */
+       mptcp_for_each_subflow_safe(msk, subflow, tmp)
+-              __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, flags);
++              __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, 0);
+       __skb_queue_purge(&sk->sk_receive_queue);
+       skb_rbtree_purge(&msk->out_of_order_queue);
+@@ -3416,7 +3421,7 @@ static void mptcp_destroy(struct sock *s
+       /* allow the following to close even the initial subflow */
+       msk->free_first = 1;
+-      mptcp_destroy_common(msk, 0);
++      mptcp_destroy_common(msk);
+       sk_sockets_allocated_dec(sk);
+ }
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -979,7 +979,7 @@ static inline void mptcp_propagate_sndbu
+       local_bh_enable();
+ }
+-void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags);
++void mptcp_destroy_common(struct mptcp_sock *msk);
+ #define MPTCP_TOKEN_MAX_RETRIES       4
diff --git a/queue-6.17/mptcp-do-not-fallback-when-ooo-is-present.patch b/queue-6.17/mptcp-do-not-fallback-when-ooo-is-present.patch
new file mode 100644 (file)
index 0000000..0d22390
--- /dev/null
@@ -0,0 +1,43 @@
+From 1bba3f219c5e8c29e63afa3c1fc24f875ebec119 Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Tue, 18 Nov 2025 08:20:22 +0100
+Subject: mptcp: do not fallback when OoO is present
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit 1bba3f219c5e8c29e63afa3c1fc24f875ebec119 upstream.
+
+In case of DSS corruption, the MPTCP protocol tries to avoid the subflow
+reset if fallback is possible. Such corruptions happen in the receive
+path; to ensure fallback is possible the stack additionally needs to
+check for OoO data, otherwise the fallback will break the data stream.
+
+Fixes: e32d262c89e2 ("mptcp: handle consistently DSS corruption")
+Cc: stable@vger.kernel.org
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/598
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20251118-net-mptcp-misc-fixes-6-18-rc6-v1-4-806d3781c95f@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/protocol.c |    7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -77,6 +77,13 @@ bool __mptcp_try_fallback(struct mptcp_s
+       if (__mptcp_check_fallback(msk))
+               return true;
++      /* The caller possibly is not holding the msk socket lock, but
++       * in the fallback case only the current subflow is touching
++       * the OoO queue.
++       */
++      if (!RB_EMPTY_ROOT(&msk->out_of_order_queue))
++              return false;
++
+       spin_lock_bh(&msk->fallback_lock);
+       if (!msk->allow_infinite_fallback) {
+               spin_unlock_bh(&msk->fallback_lock);
diff --git a/queue-6.17/mptcp-fix-a-race-in-mptcp_pm_del_add_timer.patch b/queue-6.17/mptcp-fix-a-race-in-mptcp_pm_del_add_timer.patch
new file mode 100644 (file)
index 0000000..920c800
--- /dev/null
@@ -0,0 +1,194 @@
+From 426358d9be7ce3518966422f87b96f1bad27295f Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Mon, 17 Nov 2025 10:07:44 +0000
+Subject: mptcp: fix a race in mptcp_pm_del_add_timer()
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit 426358d9be7ce3518966422f87b96f1bad27295f upstream.
+
+mptcp_pm_del_add_timer() can call sk_stop_timer_sync(sk, &entry->add_timer)
+while another might have free entry already, as reported by syzbot.
+
+Add RCU protection to fix this issue.
+
+Also change confusing add_timer variable with stop_timer boolean.
+
+syzbot report:
+
+BUG: KASAN: slab-use-after-free in __timer_delete_sync+0x372/0x3f0 kernel/time/timer.c:1616
+Read of size 4 at addr ffff8880311e4150 by task kworker/1:1/44
+
+CPU: 1 UID: 0 PID: 44 Comm: kworker/1:1 Not tainted syzkaller #0 PREEMPT_{RT,(full)}
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 10/02/2025
+Workqueue: events mptcp_worker
+Call Trace:
+ <TASK>
+  dump_stack_lvl+0x189/0x250 lib/dump_stack.c:120
+  print_address_description mm/kasan/report.c:378 [inline]
+  print_report+0xca/0x240 mm/kasan/report.c:482
+  kasan_report+0x118/0x150 mm/kasan/report.c:595
+  __timer_delete_sync+0x372/0x3f0 kernel/time/timer.c:1616
+  sk_stop_timer_sync+0x1b/0x90 net/core/sock.c:3631
+  mptcp_pm_del_add_timer+0x283/0x310 net/mptcp/pm.c:362
+  mptcp_incoming_options+0x1357/0x1f60 net/mptcp/options.c:1174
+  tcp_data_queue+0xca/0x6450 net/ipv4/tcp_input.c:5361
+  tcp_rcv_established+0x1335/0x2670 net/ipv4/tcp_input.c:6441
+  tcp_v4_do_rcv+0x98b/0xbf0 net/ipv4/tcp_ipv4.c:1931
+  tcp_v4_rcv+0x252a/0x2dc0 net/ipv4/tcp_ipv4.c:2374
+  ip_protocol_deliver_rcu+0x221/0x440 net/ipv4/ip_input.c:205
+  ip_local_deliver_finish+0x3bb/0x6f0 net/ipv4/ip_input.c:239
+  NF_HOOK+0x30c/0x3a0 include/linux/netfilter.h:318
+  NF_HOOK+0x30c/0x3a0 include/linux/netfilter.h:318
+  __netif_receive_skb_one_core net/core/dev.c:6079 [inline]
+  __netif_receive_skb+0x143/0x380 net/core/dev.c:6192
+  process_backlog+0x31e/0x900 net/core/dev.c:6544
+  __napi_poll+0xb6/0x540 net/core/dev.c:7594
+  napi_poll net/core/dev.c:7657 [inline]
+  net_rx_action+0x5f7/0xda0 net/core/dev.c:7784
+  handle_softirqs+0x22f/0x710 kernel/softirq.c:622
+  __do_softirq kernel/softirq.c:656 [inline]
+  __local_bh_enable_ip+0x1a0/0x2e0 kernel/softirq.c:302
+  mptcp_pm_send_ack net/mptcp/pm.c:210 [inline]
+ mptcp_pm_addr_send_ack+0x41f/0x500 net/mptcp/pm.c:-1
+  mptcp_pm_worker+0x174/0x320 net/mptcp/pm.c:1002
+  mptcp_worker+0xd5/0x1170 net/mptcp/protocol.c:2762
+  process_one_work kernel/workqueue.c:3263 [inline]
+  process_scheduled_works+0xae1/0x17b0 kernel/workqueue.c:3346
+  worker_thread+0x8a0/0xda0 kernel/workqueue.c:3427
+  kthread+0x711/0x8a0 kernel/kthread.c:463
+  ret_from_fork+0x4bc/0x870 arch/x86/kernel/process.c:158
+  ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:245
+ </TASK>
+
+Allocated by task 44:
+  kasan_save_stack mm/kasan/common.c:56 [inline]
+  kasan_save_track+0x3e/0x80 mm/kasan/common.c:77
+  poison_kmalloc_redzone mm/kasan/common.c:400 [inline]
+  __kasan_kmalloc+0x93/0xb0 mm/kasan/common.c:417
+  kasan_kmalloc include/linux/kasan.h:262 [inline]
+  __kmalloc_cache_noprof+0x1ef/0x6c0 mm/slub.c:5748
+  kmalloc_noprof include/linux/slab.h:957 [inline]
+  mptcp_pm_alloc_anno_list+0x104/0x460 net/mptcp/pm.c:385
+  mptcp_pm_create_subflow_or_signal_addr+0xf9d/0x1360 net/mptcp/pm_kernel.c:355
+  mptcp_pm_nl_fully_established net/mptcp/pm_kernel.c:409 [inline]
+  __mptcp_pm_kernel_worker+0x417/0x1ef0 net/mptcp/pm_kernel.c:1529
+  mptcp_pm_worker+0x1ee/0x320 net/mptcp/pm.c:1008
+  mptcp_worker+0xd5/0x1170 net/mptcp/protocol.c:2762
+  process_one_work kernel/workqueue.c:3263 [inline]
+  process_scheduled_works+0xae1/0x17b0 kernel/workqueue.c:3346
+  worker_thread+0x8a0/0xda0 kernel/workqueue.c:3427
+  kthread+0x711/0x8a0 kernel/kthread.c:463
+  ret_from_fork+0x4bc/0x870 arch/x86/kernel/process.c:158
+  ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:245
+
+Freed by task 6630:
+  kasan_save_stack mm/kasan/common.c:56 [inline]
+  kasan_save_track+0x3e/0x80 mm/kasan/common.c:77
+  __kasan_save_free_info+0x46/0x50 mm/kasan/generic.c:587
+  kasan_save_free_info mm/kasan/kasan.h:406 [inline]
+  poison_slab_object mm/kasan/common.c:252 [inline]
+  __kasan_slab_free+0x5c/0x80 mm/kasan/common.c:284
+  kasan_slab_free include/linux/kasan.h:234 [inline]
+  slab_free_hook mm/slub.c:2523 [inline]
+  slab_free mm/slub.c:6611 [inline]
+  kfree+0x197/0x950 mm/slub.c:6818
+  mptcp_remove_anno_list_by_saddr+0x2d/0x40 net/mptcp/pm.c:158
+  mptcp_pm_flush_addrs_and_subflows net/mptcp/pm_kernel.c:1209 [inline]
+  mptcp_nl_flush_addrs_list net/mptcp/pm_kernel.c:1240 [inline]
+  mptcp_pm_nl_flush_addrs_doit+0x593/0xbb0 net/mptcp/pm_kernel.c:1281
+  genl_family_rcv_msg_doit+0x215/0x300 net/netlink/genetlink.c:1115
+  genl_family_rcv_msg net/netlink/genetlink.c:1195 [inline]
+  genl_rcv_msg+0x60e/0x790 net/netlink/genetlink.c:1210
+  netlink_rcv_skb+0x208/0x470 net/netlink/af_netlink.c:2552
+  genl_rcv+0x28/0x40 net/netlink/genetlink.c:1219
+  netlink_unicast_kernel net/netlink/af_netlink.c:1320 [inline]
+  netlink_unicast+0x846/0xa10 net/netlink/af_netlink.c:1346
+  netlink_sendmsg+0x805/0xb30 net/netlink/af_netlink.c:1896
+  sock_sendmsg_nosec net/socket.c:727 [inline]
+  __sock_sendmsg+0x21c/0x270 net/socket.c:742
+  ____sys_sendmsg+0x508/0x820 net/socket.c:2630
+  ___sys_sendmsg+0x21f/0x2a0 net/socket.c:2684
+  __sys_sendmsg net/socket.c:2716 [inline]
+  __do_sys_sendmsg net/socket.c:2721 [inline]
+  __se_sys_sendmsg net/socket.c:2719 [inline]
+  __x64_sys_sendmsg+0x1a1/0x260 net/socket.c:2719
+  do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+  do_syscall_64+0xfa/0xfa0 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+
+Cc: stable@vger.kernel.org
+Fixes: 00cfd77b9063 ("mptcp: retransmit ADD_ADDR when timeout")
+Reported-by: syzbot+2a6fbf0f0530375968df@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/691ad3c3.a70a0220.f6df1.0004.GAE@google.com
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Geliang Tang <geliang@kernel.org>
+Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20251117100745.1913963-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm.c |   20 +++++++++++++-------
+ 1 file changed, 13 insertions(+), 7 deletions(-)
+
+--- a/net/mptcp/pm.c
++++ b/net/mptcp/pm.c
+@@ -18,6 +18,7 @@ struct mptcp_pm_add_entry {
+       u8                      retrans_times;
+       struct timer_list       add_timer;
+       struct mptcp_sock       *sock;
++      struct rcu_head         rcu;
+ };
+ static DEFINE_SPINLOCK(mptcp_pm_list_lock);
+@@ -155,7 +156,7 @@ bool mptcp_remove_anno_list_by_saddr(str
+       entry = mptcp_pm_del_add_timer(msk, addr, false);
+       ret = entry;
+-      kfree(entry);
++      kfree_rcu(entry, rcu);
+       return ret;
+ }
+@@ -324,22 +325,27 @@ mptcp_pm_del_add_timer(struct mptcp_sock
+ {
+       struct mptcp_pm_add_entry *entry;
+       struct sock *sk = (struct sock *)msk;
+-      struct timer_list *add_timer = NULL;
++      bool stop_timer = false;
++
++      rcu_read_lock();
+       spin_lock_bh(&msk->pm.lock);
+       entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
+       if (entry && (!check_id || entry->addr.id == addr->id)) {
+               entry->retrans_times = ADD_ADDR_RETRANS_MAX;
+-              add_timer = &entry->add_timer;
++              stop_timer = true;
+       }
+       if (!check_id && entry)
+               list_del(&entry->list);
+       spin_unlock_bh(&msk->pm.lock);
+-      /* no lock, because sk_stop_timer_sync() is calling timer_delete_sync() */
+-      if (add_timer)
+-              sk_stop_timer_sync(sk, add_timer);
++      /* Note: entry might have been removed by another thread.
++       * We hold rcu_read_lock() to ensure it is not freed under us.
++       */
++      if (stop_timer)
++              sk_stop_timer_sync(sk, &entry->add_timer);
++      rcu_read_unlock();
+       return entry;
+ }
+@@ -395,7 +401,7 @@ static void mptcp_pm_free_anno_list(stru
+       list_for_each_entry_safe(entry, tmp, &free_list, list) {
+               sk_stop_timer_sync(sk, &entry->add_timer);
+-              kfree(entry);
++              kfree_rcu(entry, rcu);
+       }
+ }
diff --git a/queue-6.17/mptcp-fix-ack-generation-for-fallback-msk.patch b/queue-6.17/mptcp-fix-ack-generation-for-fallback-msk.patch
new file mode 100644 (file)
index 0000000..788d261
--- /dev/null
@@ -0,0 +1,85 @@
+From 5e15395f6d9ec07395866c5511f4b4ac566c0c9b Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Tue, 18 Nov 2025 08:20:19 +0100
+Subject: mptcp: fix ack generation for fallback msk
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit 5e15395f6d9ec07395866c5511f4b4ac566c0c9b upstream.
+
+mptcp_cleanup_rbuf() needs to know the last most recent, mptcp-level
+rcv_wnd sent, and such information is tracked into the msk->old_wspace
+field, updated at ack transmission time by mptcp_write_options().
+
+Fallback socket do not add any mptcp options, such helper is never
+invoked, and msk->old_wspace value remain stale. That in turn makes
+ack generation at recvmsg() time quite random.
+
+Address the issue ensuring mptcp_write_options() is invoked even for
+fallback sockets, and just update the needed info in such a case.
+
+The issue went unnoticed for a long time, as mptcp currently overshots
+the fallback socket receive buffer autotune significantly. It is going
+to change in the near future.
+
+Fixes: e3859603ba13 ("mptcp: better msk receive window updates")
+Cc: stable@vger.kernel.org
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/594
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Geliang Tang <geliang@kernel.org>
+Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20251118-net-mptcp-misc-fixes-6-18-rc6-v1-1-806d3781c95f@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/options.c |   23 ++++++++++++++++++++++-
+ 1 file changed, 22 insertions(+), 1 deletion(-)
+
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -838,8 +838,11 @@ bool mptcp_established_options(struct so
+       opts->suboptions = 0;
++      /* Force later mptcp_write_options(), but do not use any actual
++       * option space.
++       */
+       if (unlikely(__mptcp_check_fallback(msk) && !mptcp_check_infinite_map(skb)))
+-              return false;
++              return true;
+       if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST)) {
+               if (mptcp_established_options_fastclose(sk, &opt_size, remaining, opts) ||
+@@ -1319,6 +1322,20 @@ update_wspace:
+       WRITE_ONCE(msk->old_wspace, tp->rcv_wnd);
+ }
++static void mptcp_track_rwin(struct tcp_sock *tp)
++{
++      const struct sock *ssk = (const struct sock *)tp;
++      struct mptcp_subflow_context *subflow;
++      struct mptcp_sock *msk;
++
++      if (!ssk)
++              return;
++
++      subflow = mptcp_subflow_ctx(ssk);
++      msk = mptcp_sk(subflow->conn);
++      WRITE_ONCE(msk->old_wspace, tp->rcv_wnd);
++}
++
+ __sum16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum)
+ {
+       struct csum_pseudo_header header;
+@@ -1611,6 +1628,10 @@ mp_rst:
+                                     opts->reset_transient,
+                                     opts->reset_reason);
+               return;
++      } else if (unlikely(!opts->suboptions)) {
++              /* Fallback to TCP */
++              mptcp_track_rwin(tp);
++              return;
+       }
+       if (OPTION_MPTCP_PRIO & opts->suboptions) {
diff --git a/queue-6.17/mptcp-fix-duplicate-reset-on-fastclose.patch b/queue-6.17/mptcp-fix-duplicate-reset-on-fastclose.patch
new file mode 100644 (file)
index 0000000..02d3618
--- /dev/null
@@ -0,0 +1,109 @@
+From ae155060247be8dcae3802a95bd1bdf93ab3215d Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Tue, 18 Nov 2025 08:20:24 +0100
+Subject: mptcp: fix duplicate reset on fastclose
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit ae155060247be8dcae3802a95bd1bdf93ab3215d upstream.
+
+The CI reports sporadic failures of the fastclose self-tests. The root
+cause is a duplicate reset, not carrying the relevant MPTCP option.
+In the failing scenario the bad reset is received by the peer before
+the fastclose one, preventing the reception of the latter.
+
+Indeed there is window of opportunity at fastclose time for the
+following race:
+
+  mptcp_do_fastclose
+    __mptcp_close_ssk
+      __tcp_close()
+        tcp_set_state() [1]
+        tcp_send_active_reset() [2]
+
+After [1] the stack will send reset to in-flight data reaching the now
+closed port. Such reset may race with [2].
+
+Address the issue explicitly sending a single reset on fastclose before
+explicitly moving the subflow to close status.
+
+Fixes: d21f83485518 ("mptcp: use fastclose on more edge scenarios")
+Cc: stable@vger.kernel.org
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/596
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Geliang Tang <geliang@kernel.org>
+Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20251118-net-mptcp-misc-fixes-6-18-rc6-v1-6-806d3781c95f@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/protocol.c |   36 +++++++++++++++++++++++-------------
+ 1 file changed, 23 insertions(+), 13 deletions(-)
+
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2377,7 +2377,6 @@ bool __mptcp_retransmit_pending_data(str
+ /* flags for __mptcp_close_ssk() */
+ #define MPTCP_CF_PUSH         BIT(1)
+-#define MPTCP_CF_FASTCLOSE    BIT(2)
+ /* be sure to send a reset only if the caller asked for it, also
+  * clean completely the subflow status when the subflow reaches
+@@ -2388,7 +2387,7 @@ static void __mptcp_subflow_disconnect(s
+                                      unsigned int flags)
+ {
+       if (((1 << ssk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
+-          (flags & MPTCP_CF_FASTCLOSE)) {
++          subflow->send_fastclose) {
+               /* The MPTCP code never wait on the subflow sockets, TCP-level
+                * disconnect should never fail
+                */
+@@ -2435,14 +2434,8 @@ static void __mptcp_close_ssk(struct soc
+       lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
+-      if ((flags & MPTCP_CF_FASTCLOSE) && !__mptcp_check_fallback(msk)) {
+-              /* be sure to force the tcp_close path
+-               * to generate the egress reset
+-               */
+-              ssk->sk_lingertime = 0;
+-              sock_set_flag(ssk, SOCK_LINGER);
+-              subflow->send_fastclose = 1;
+-      }
++      if (subflow->send_fastclose && ssk->sk_state != TCP_CLOSE)
++              tcp_set_state(ssk, TCP_CLOSE);
+       need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk);
+       if (!dispose_it) {
+@@ -2745,9 +2738,26 @@ static void mptcp_do_fastclose(struct so
+       struct mptcp_sock *msk = mptcp_sk(sk);
+       mptcp_set_state(sk, TCP_CLOSE);
+-      mptcp_for_each_subflow_safe(msk, subflow, tmp)
+-              __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow),
+-                                subflow, MPTCP_CF_FASTCLOSE);
++
++      /* Explicitly send the fastclose reset as need */
++      if (__mptcp_check_fallback(msk))
++              return;
++
++      mptcp_for_each_subflow_safe(msk, subflow, tmp) {
++              struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++
++              lock_sock(ssk);
++
++              /* Some subflow socket states don't allow/need a reset.*/
++              if ((1 << ssk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
++                      goto unlock;
++
++              subflow->send_fastclose = 1;
++              tcp_send_active_reset(ssk, ssk->sk_allocation,
++                                    SK_RST_REASON_TCP_ABORT_ON_CLOSE);
++unlock:
++              release_sock(ssk);
++      }
+ }
+ static void mptcp_worker(struct work_struct *work)
diff --git a/queue-6.17/mptcp-fix-premature-close-in-case-of-fallback.patch b/queue-6.17/mptcp-fix-premature-close-in-case-of-fallback.patch
new file mode 100644 (file)
index 0000000..8554141
--- /dev/null
@@ -0,0 +1,60 @@
+From 17393fa7b7086664be519e7230cb6ed7ec7d9462 Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Tue, 18 Nov 2025 08:20:21 +0100
+Subject: mptcp: fix premature close in case of fallback
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit 17393fa7b7086664be519e7230cb6ed7ec7d9462 upstream.
+
+I'm observing very frequent self-tests failures in case of fallback when
+running on a CONFIG_PREEMPT kernel.
+
+The root cause is that subflow_sched_work_if_closed() closes any subflow
+as soon as it is half-closed and has no incoming data pending.
+
+That works well for regular subflows - MPTCP needs bi-directional
+connectivity to operate on a given subflow - but for fallback socket is
+race prone.
+
+When TCP peer closes the connection before the MPTCP one,
+subflow_sched_work_if_closed() will schedule the MPTCP worker to
+gracefully close the subflow, and shortly after will do another schedule
+to inject and process a dummy incoming DATA_FIN.
+
+On CONFIG_PREEMPT kernel, the MPTCP worker can kick-in and close the
+fallback subflow before subflow_sched_work_if_closed() is able to create
+the dummy DATA_FIN, unexpectedly interrupting the transfer.
+
+Address the issue explicitly avoiding closing fallback subflows on when
+the peer is only half-closed.
+
+Note that, when the subflow is able to create the DATA_FIN before the
+worker invocation, the worker will change the msk state before trying to
+close the subflow and will skip the latter operation as the msk will not
+match anymore the precondition in __mptcp_close_subflow().
+
+Fixes: f09b0ad55a11 ("mptcp: close subflow when receiving TCP+FIN")
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20251118-net-mptcp-misc-fixes-6-18-rc6-v1-3-806d3781c95f@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/protocol.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2531,7 +2531,8 @@ static void __mptcp_close_subflow(struct
+               if (ssk_state != TCP_CLOSE &&
+                   (ssk_state != TCP_CLOSE_WAIT ||
+-                   inet_sk_state_load(sk) != TCP_ESTABLISHED))
++                   inet_sk_state_load(sk) != TCP_ESTABLISHED ||
++                   __mptcp_check_fallback(msk)))
+                       continue;
+               /* 'subflow_data_ready' will re-sched once rx queue is empty */
diff --git a/queue-6.17/selftests-mptcp-join-endpoints-longer-timeout.patch b/queue-6.17/selftests-mptcp-join-endpoints-longer-timeout.patch
new file mode 100644 (file)
index 0000000..d7d4a34
--- /dev/null
@@ -0,0 +1,75 @@
+From fb13c6bb810ca871964e062cf91882d1c83db509 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Tue, 18 Nov 2025 08:20:26 +0100
+Subject: selftests: mptcp: join: endpoints: longer timeout
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit fb13c6bb810ca871964e062cf91882d1c83db509 upstream.
+
+In rare cases, when the test environment is very slow, some endpoints
+tests can fail because some expected events have not been seen.
+
+Because the tests are expecting a long on-going connection, and they are
+not waiting for the end of the transfer, it is fine to have a longer
+timeout, and even go over the default one. This connection will be
+killed at the end, after the verifications: increasing the timeout
+doesn't change anything, apart from avoiding it to end before the end of
+the verifications.
+
+To play it safe, all endpoints tests not waiting for the end of the
+transfer are now having a longer timeout: 2 minutes.
+
+The Fixes commit was making the connection longer, but still, the
+default timeout would have stopped it after 1 minute, which might not be
+enough in very slow environments.
+
+Fixes: 6457595db987 ("selftests: mptcp: join: endpoints: longer transfer")
+Cc: stable@vger.kernel.org
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Reviewed-by: Geliang Tang <geliang@kernel.org>
+Link: https://patch.msgid.link/20251118-net-mptcp-misc-fixes-6-18-rc6-v1-8-806d3781c95f@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/mptcp/mptcp_join.sh |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -3757,7 +3757,7 @@ endpoint_tests()
+               pm_nl_set_limits $ns1 2 2
+               pm_nl_set_limits $ns2 2 2
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+-              { test_linkfail=128 speed=slow \
++              { timeout_test=120 test_linkfail=128 speed=slow \
+                       run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null
+               local tests_pid=$!
+@@ -3784,7 +3784,7 @@ endpoint_tests()
+               pm_nl_set_limits $ns2 0 3
+               pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow
+               pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
+-              { test_linkfail=128 speed=5 \
++              { timeout_test=120 test_linkfail=128 speed=5 \
+                       run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null
+               local tests_pid=$!
+@@ -3862,7 +3862,7 @@ endpoint_tests()
+               # broadcast IP: no packet for this address will be received on ns1
+               pm_nl_add_endpoint $ns1 224.0.0.1 id 2 flags signal
+               pm_nl_add_endpoint $ns1 10.0.1.1 id 42 flags signal
+-              { test_linkfail=128 speed=5 \
++              { timeout_test=120 test_linkfail=128 speed=5 \
+                       run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null
+               local tests_pid=$!
+@@ -3935,7 +3935,7 @@ endpoint_tests()
+               # broadcast IP: no packet for this address will be received on ns1
+               pm_nl_add_endpoint $ns1 224.0.0.1 id 2 flags signal
+               pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow
+-              { test_linkfail=128 speed=20 \
++              { timeout_test=120 test_linkfail=128 speed=20 \
+                       run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null
+               local tests_pid=$!
diff --git a/queue-6.17/selftests-mptcp-join-userspace-longer-timeout.patch b/queue-6.17/selftests-mptcp-join-userspace-longer-timeout.patch
new file mode 100644 (file)
index 0000000..5335d69
--- /dev/null
@@ -0,0 +1,84 @@
+From 0e4ec14dc1ee4b1ec347729c225c3ca950f2bcf6 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Tue, 18 Nov 2025 08:20:27 +0100
+Subject: selftests: mptcp: join: userspace: longer timeout
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 0e4ec14dc1ee4b1ec347729c225c3ca950f2bcf6 upstream.
+
+In rare cases, when the test environment is very slow, some userspace
+tests can fail because some expected events have not been seen.
+
+Because the tests are expecting a long on-going connection, and they are
+not waiting for the end of the transfer, it is fine to have a longer
+timeout, and even go over the default one. This connection will be
+killed at the end, after the verifications: increasing the timeout
+doesn't change anything, apart from avoiding it to end before the end of
+the verifications.
+
+To play it safe, all userspace tests not waiting for the end of the
+transfer are now having a longer timeout: 2 minutes.
+
+The Fixes commit was making the connection longer, but still, the
+default timeout would have stopped it after 1 minute, which might not be
+enough in very slow environments.
+
+Fixes: 290493078b96 ("selftests: mptcp: join: userspace: longer transfer")
+Cc: stable@vger.kernel.org
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Reviewed-by: Geliang Tang <geliang@kernel.org>
+Link: https://patch.msgid.link/20251118-net-mptcp-misc-fixes-6-18-rc6-v1-9-806d3781c95f@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/mptcp/mptcp_join.sh |   10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -3620,7 +3620,7 @@ userspace_tests()
+          continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
+               set_userspace_pm $ns1
+               pm_nl_set_limits $ns2 2 2
+-              { test_linkfail=128 speed=5 \
++              { timeout_test=120 test_linkfail=128 speed=5 \
+                       run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null
+               local tests_pid=$!
+               wait_mpj $ns1
+@@ -3653,7 +3653,7 @@ userspace_tests()
+          continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
+               set_userspace_pm $ns2
+               pm_nl_set_limits $ns1 0 1
+-              { test_linkfail=128 speed=5 \
++              { timeout_test=120 test_linkfail=128 speed=5 \
+                       run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null
+               local tests_pid=$!
+               wait_mpj $ns2
+@@ -3681,7 +3681,7 @@ userspace_tests()
+          continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
+               set_userspace_pm $ns2
+               pm_nl_set_limits $ns1 0 1
+-              { test_linkfail=128 speed=5 \
++              { timeout_test=120 test_linkfail=128 speed=5 \
+                       run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null
+               local tests_pid=$!
+               wait_mpj $ns2
+@@ -3702,7 +3702,7 @@ userspace_tests()
+          continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
+               set_userspace_pm $ns2
+               pm_nl_set_limits $ns1 0 1
+-              { test_linkfail=128 speed=5 \
++              { timeout_test=120 test_linkfail=128 speed=5 \
+                       run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null
+               local tests_pid=$!
+               wait_mpj $ns2
+@@ -3726,7 +3726,7 @@ userspace_tests()
+          continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
+               set_userspace_pm $ns1
+               pm_nl_set_limits $ns2 1 1
+-              { test_linkfail=128 speed=5 \
++              { timeout_test=120 test_linkfail=128 speed=5 \
+                       run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null
+               local tests_pid=$!
+               wait_mpj $ns1
index eea22f6b2c74c6be85a01e1fb0d60a377d8bde15..2c5f9ea76b31084432660a70c8cfb0fd9cdb15db 100644 (file)
@@ -56,3 +56,21 @@ platform-x86-alienware-wmi-wmax-add-support-for-the-whole-x-family.patch
 platform-x86-alienware-wmi-wmax-add-support-for-the-whole-g-family.patch
 platform-x86-alienware-wmi-wmax-add-awcc-support-to-alienware-16-aurora.patch
 mptcp-fix-race-condition-in-mptcp_schedule_work.patch
+mptcp-fix-a-race-in-mptcp_pm_del_add_timer.patch
+mptcp-fix-ack-generation-for-fallback-msk.patch
+mptcp-fix-duplicate-reset-on-fastclose.patch
+mptcp-fix-premature-close-in-case-of-fallback.patch
+selftests-mptcp-join-endpoints-longer-timeout.patch
+selftests-mptcp-join-userspace-longer-timeout.patch
+mptcp-avoid-unneeded-subflow-level-drops.patch
+mptcp-decouple-mptcp-fastclose-from-tcp-close.patch
+mptcp-do-not-fallback-when-ooo-is-present.patch
+drm-tegra-dc-fix-reference-leak-in-tegra_dc_couple.patch
+drm-radeon-delete-radeon_fence_process-in-is_signaled-no-deadlock.patch
+drm-plane-fix-create_in_format_blob-return-value.patch
+drm-amd-skip-power-ungate-during-suspend-for-vpe.patch
+drm-amdgpu-skip-emit-de-meta-data-on-gfx11-with-rs64-enabled.patch
+drm-amd-display-increase-dpcd-read-retries.patch
+drm-amd-display-move-sleep-into-each-retry-for-retrieve_link_cap.patch
+drm-amd-display-fix-pbn-to-kbps-conversion.patch
+drm-amd-display-clear-the-cur_enable-register-on-dcn20-on-dpp5.patch