From: Greg Kroah-Hartman Date: Mon, 24 Nov 2025 13:58:39 +0000 (+0100) Subject: 6.12-stable patches X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=1bf99768545ac5aeea33d81b0f69c95e9392cfc6;p=thirdparty%2Fkernel%2Fstable-queue.git 6.12-stable patches added patches: drm-amd-display-clear-the-cur_enable-register-on-dcn20-on-dpp5.patch drm-amd-display-fix-pbn-to-kbps-conversion.patch drm-amd-display-increase-dpcd-read-retries.patch drm-amd-display-move-sleep-into-each-retry-for-retrieve_link_cap.patch drm-amd-skip-power-ungate-during-suspend-for-vpe.patch drm-amdgpu-skip-emit-de-meta-data-on-gfx11-with-rs64-enabled.patch drm-radeon-delete-radeon_fence_process-in-is_signaled-no-deadlock.patch drm-tegra-dc-fix-reference-leak-in-tegra_dc_couple.patch mptcp-avoid-unneeded-subflow-level-drops.patch mptcp-decouple-mptcp-fastclose-from-tcp-close.patch mptcp-do-not-fallback-when-ooo-is-present.patch mptcp-fix-ack-generation-for-fallback-msk.patch mptcp-fix-duplicate-reset-on-fastclose.patch mptcp-fix-premature-close-in-case-of-fallback.patch selftests-mptcp-join-endpoints-longer-timeout.patch selftests-mptcp-join-userspace-longer-timeout.patch --- diff --git a/queue-6.12/drm-amd-display-clear-the-cur_enable-register-on-dcn20-on-dpp5.patch b/queue-6.12/drm-amd-display-clear-the-cur_enable-register-on-dcn20-on-dpp5.patch new file mode 100644 index 0000000000..62acd2e648 --- /dev/null +++ b/queue-6.12/drm-amd-display-clear-the-cur_enable-register-on-dcn20-on-dpp5.patch @@ -0,0 +1,49 @@ +From 5bab4c89390f32b2f491f49a151948cd226dd909 Mon Sep 17 00:00:00 2001 +From: Ivan Lipski +Date: Wed, 5 Nov 2025 15:27:42 -0500 +Subject: drm/amd/display: Clear the CUR_ENABLE register on DCN20 on DPP5 + +From: Ivan Lipski + +commit 5bab4c89390f32b2f491f49a151948cd226dd909 upstream. + +[Why] +On DCN20 & DCN30, the 6th DPP's & HUBP's are powered on permanently and +cannot be power gated. Thus, when dpp_reset() is invoked for the DPP5, +while it's still powered on, the cached cursor_state +(dpp_base->pos.cur0_ctl.bits.cur0_enable) +and the actual state (CUR0_ENABLE) bit are unsycned. This can cause a +double cursor in full screen with non-native scaling. + +[How] +Force disable cursor on DPP5 on plane powerdown for ASICs w/ 6 DPPs/HUBPs. + +Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4673 +Reviewed-by: Aric Cyr +Signed-off-by: Ivan Lipski +Tested-by: Dan Wheeler +Signed-off-by: Alex Deucher +(cherry picked from commit 79b3c037f972dcb13e325a8eabfb8da835764e15) +Cc: stable@vger.kernel.org +Signed-off-by: Greg Kroah-Hartman +--- + drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c ++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +@@ -604,6 +604,14 @@ void dcn20_dpp_pg_control( + * DOMAIN11_PGFSM_PWR_STATUS, pwr_status, + * 1, 1000); + */ ++ ++ /* Force disable cursor on plane powerdown on DPP 5 using dpp_force_disable_cursor */ ++ if (!power_on) { ++ struct dpp *dpp5 = hws->ctx->dc->res_pool->dpps[dpp_inst]; ++ if (dpp5 && dpp5->funcs->dpp_force_disable_cursor) ++ dpp5->funcs->dpp_force_disable_cursor(dpp5); ++ } ++ + break; + default: + BREAK_TO_DEBUGGER(); diff --git a/queue-6.12/drm-amd-display-fix-pbn-to-kbps-conversion.patch b/queue-6.12/drm-amd-display-fix-pbn-to-kbps-conversion.patch new file mode 100644 index 0000000000..2c00bd45e0 --- /dev/null +++ b/queue-6.12/drm-amd-display-fix-pbn-to-kbps-conversion.patch @@ -0,0 +1,193 @@ +From 1788ef30725da53face7e311cdf62ad65fababcd Mon Sep 17 00:00:00 2001 +From: Fangzhi Zuo +Date: Fri, 7 Nov 2025 15:01:30 -0500 +Subject: drm/amd/display: Fix pbn to kbps Conversion + +From: Fangzhi Zuo + +commit 1788ef30725da53face7e311cdf62ad65fababcd upstream. + +[Why] +Existing routine has two conversion sequence, +pbn_to_kbps and kbps_to_pbn with margin. +Non of those has without-margin calculation. + +kbps_to_pbn with margin conversion includes +fec overhead which has already been included in +pbn_div calculation with 0.994 factor considered. +It is a double counted fec overhead factor that causes +potential bw loss. + +[How] +Add without-margin calculation. +Fix fec overhead double counted issue. + +Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/3735 +Reviewed-by: Aurabindo Pillai +Signed-off-by: Fangzhi Zuo +Signed-off-by: Ivan Lipski +Tested-by: Dan Wheeler +Signed-off-by: Alex Deucher +(cherry picked from commit e0dec00f3d05e8c0eceaaebfdca217f8d10d380c) +Cc: stable@vger.kernel.org +Signed-off-by: Greg Kroah-Hartman +--- + drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 59 ++++-------- + 1 file changed, 23 insertions(+), 36 deletions(-) + +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +@@ -846,26 +846,28 @@ struct dsc_mst_fairness_params { + }; + + #if defined(CONFIG_DRM_AMD_DC_FP) +-static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link) ++static uint64_t kbps_to_pbn(int kbps, bool is_peak_pbn) + { +- u8 link_coding_cap; +- uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B; ++ uint64_t effective_kbps = (uint64_t)kbps; + +- link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link); +- if (link_coding_cap == DP_128b_132b_ENCODING) +- fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B; ++ if (is_peak_pbn) { // add 0.6% (1006/1000) overhead into effective kbps ++ effective_kbps *= 1006; ++ effective_kbps = div_u64(effective_kbps, 1000); ++ } + +- return fec_overhead_multiplier_x1000; ++ return (uint64_t) DIV64_U64_ROUND_UP(effective_kbps * 64, (54 * 8 * 1000)); + } + +-static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000) ++static uint32_t pbn_to_kbps(unsigned int pbn, bool with_margin) + { +- u64 peak_kbps = kbps; ++ uint64_t pbn_effective = (uint64_t)pbn; ++ ++ if (with_margin) // deduct 0.6% (994/1000) overhead from effective pbn ++ pbn_effective *= (1000000 / PEAK_FACTOR_X1000); ++ else ++ pbn_effective *= 1000; + +- peak_kbps *= 1006; +- peak_kbps *= fec_overhead_multiplier_x1000; +- peak_kbps = div_u64(peak_kbps, 1000 * 1000); +- return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000)); ++ return DIV_U64_ROUND_UP(pbn_effective * 8 * 54, 64); + } + + static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params, +@@ -936,7 +938,7 @@ static int bpp_x16_from_pbn(struct dsc_m + dc_dsc_get_default_config_option(param.sink->ctx->dc, &dsc_options); + dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16; + +- kbps = div_u64((u64)pbn * 994 * 8 * 54, 64); ++ kbps = pbn_to_kbps(pbn, false); + dc_dsc_compute_config( + param.sink->ctx->dc->res_pool->dscs[0], + ¶m.sink->dsc_caps.dsc_dec_caps, +@@ -965,12 +967,11 @@ static int increase_dsc_bpp(struct drm_a + int link_timeslots_used; + int fair_pbn_alloc; + int ret = 0; +- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); + + for (i = 0; i < count; i++) { + if (vars[i + k].dsc_enabled) { + initial_slack[i] = +- kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn; ++ kbps_to_pbn(params[i].bw_range.max_kbps, false) - vars[i + k].pbn; + bpp_increased[i] = false; + remaining_to_increase += 1; + } else { +@@ -1066,7 +1067,6 @@ static int try_disable_dsc(struct drm_at + int next_index; + int remaining_to_try = 0; + int ret; +- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); + int var_pbn; + + for (i = 0; i < count; i++) { +@@ -1099,7 +1099,7 @@ static int try_disable_dsc(struct drm_at + + DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index); + var_pbn = vars[next_index].pbn; +- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000); ++ vars[next_index].pbn = kbps_to_pbn(params[next_index].bw_range.stream_kbps, true); + ret = drm_dp_atomic_find_time_slots(state, + params[next_index].port->mgr, + params[next_index].port, +@@ -1159,7 +1159,6 @@ static int compute_mst_dsc_configs_for_l + int count = 0; + int i, k, ret; + bool debugfs_overwrite = false; +- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); + struct drm_connector_state *new_conn_state; + + memset(params, 0, sizeof(params)); +@@ -1240,7 +1239,7 @@ static int compute_mst_dsc_configs_for_l + DRM_DEBUG_DRIVER("MST_DSC Try no compression\n"); + for (i = 0; i < count; i++) { + vars[i + k].aconnector = params[i].aconnector; +- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000); ++ vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false); + vars[i + k].dsc_enabled = false; + vars[i + k].bpp_x16 = 0; + ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port, +@@ -1262,7 +1261,7 @@ static int compute_mst_dsc_configs_for_l + DRM_DEBUG_DRIVER("MST_DSC Try max compression\n"); + for (i = 0; i < count; i++) { + if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) { +- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000); ++ vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.min_kbps, false); + vars[i + k].dsc_enabled = true; + vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16; + ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, +@@ -1270,7 +1269,7 @@ static int compute_mst_dsc_configs_for_l + if (ret < 0) + return ret; + } else { +- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000); ++ vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false); + vars[i + k].dsc_enabled = false; + vars[i + k].bpp_x16 = 0; + ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, +@@ -1722,18 +1721,6 @@ clean_exit: + return ret; + } + +-static uint32_t kbps_from_pbn(unsigned int pbn) +-{ +- uint64_t kbps = (uint64_t)pbn; +- +- kbps *= (1000000 / PEAK_FACTOR_X1000); +- kbps *= 8; +- kbps *= 54; +- kbps /= 64; +- +- return (uint32_t)kbps; +-} +- + static bool is_dsc_common_config_possible(struct dc_stream_state *stream, + struct dc_dsc_bw_range *bw_range) + { +@@ -1825,7 +1812,7 @@ enum dc_status dm_dp_mst_is_port_support + dc_link_get_highest_encoding_format(stream->link)); + cur_link_settings = stream->link->verified_link_cap; + root_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings); +- virtual_channel_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn); ++ virtual_channel_bw_in_kbps = pbn_to_kbps(aconnector->mst_output_port->full_pbn, true); + + /* pick the end to end bw bottleneck */ + end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps); +@@ -1876,7 +1863,7 @@ enum dc_status dm_dp_mst_is_port_support + immediate_upstream_port = aconnector->mst_output_port->parent->port_parent; + + if (immediate_upstream_port) { +- virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn); ++ virtual_channel_bw_in_kbps = pbn_to_kbps(immediate_upstream_port->full_pbn, true); + virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps); + } else { + /* For topology LCT 1 case - only one mstb*/ diff --git a/queue-6.12/drm-amd-display-increase-dpcd-read-retries.patch b/queue-6.12/drm-amd-display-increase-dpcd-read-retries.patch new file mode 100644 index 0000000000..40f755075b --- /dev/null +++ b/queue-6.12/drm-amd-display-increase-dpcd-read-retries.patch @@ -0,0 +1,41 @@ +From 8612badc331bcab2068baefa69e1458085ed89e3 Mon Sep 17 00:00:00 2001 +From: "Mario Limonciello (AMD)" +Date: Mon, 3 Nov 2025 12:11:31 -0600 +Subject: drm/amd/display: Increase DPCD read retries + +From: Mario Limonciello (AMD) + +commit 8612badc331bcab2068baefa69e1458085ed89e3 upstream. + +[Why] +Empirical measurement of some monitors that fail to read EDID while +booting shows that the number of retries with a 30ms delay between +tries is as high as 16. + +[How] +Increase number of retries to 20. + +Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4672 +Reviewed-by: Alex Hung +Signed-off-by: Mario Limonciello (AMD) +Signed-off-by: Ivan Lipski +Tested-by: Dan Wheeler +Signed-off-by: Alex Deucher +(cherry picked from commit ad1c59ad7cf74ec06e32fe2c330ac1e957222288) +Cc: stable@vger.kernel.org +Signed-off-by: Greg Kroah-Hartman +--- + drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c ++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +@@ -1587,7 +1587,7 @@ static bool retrieve_link_cap(struct dc_ + union edp_configuration_cap edp_config_cap; + union dp_downstream_port_present ds_port = { 0 }; + enum dc_status status = DC_ERROR_UNEXPECTED; +- uint32_t read_dpcd_retry_cnt = 3; ++ uint32_t read_dpcd_retry_cnt = 20; + int i; + struct dp_sink_hw_fw_revision dp_hw_fw_revision; + const uint32_t post_oui_delay = 30; // 30ms diff --git a/queue-6.12/drm-amd-display-move-sleep-into-each-retry-for-retrieve_link_cap.patch b/queue-6.12/drm-amd-display-move-sleep-into-each-retry-for-retrieve_link_cap.patch new file mode 100644 index 0000000000..3c91cb2a73 --- /dev/null +++ b/queue-6.12/drm-amd-display-move-sleep-into-each-retry-for-retrieve_link_cap.patch @@ -0,0 +1,56 @@ +From 71ad9054c1f241be63f9d11df8cbd0aa0352fe16 Mon Sep 17 00:00:00 2001 +From: "Mario Limonciello (AMD)" +Date: Mon, 3 Nov 2025 11:17:44 -0600 +Subject: drm/amd/display: Move sleep into each retry for retrieve_link_cap() + +From: Mario Limonciello (AMD) + +commit 71ad9054c1f241be63f9d11df8cbd0aa0352fe16 upstream. + +[Why] +When a monitor is booting it's possible that it isn't ready to retrieve +link caps and this can lead to an EDID read failure: + +``` +[drm:retrieve_link_cap [amdgpu]] *ERROR* retrieve_link_cap: Read receiver caps dpcd data failed. +amdgpu 0000:c5:00.0: [drm] *ERROR* No EDID read. +``` + +[How] +Rather than msleep once and try a few times, msleep each time. Should +be no changes for existing working monitors, but should correct reading +caps on a monitor that is slow to boot. + +Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4672 +Reviewed-by: Alex Hung +Signed-off-by: Mario Limonciello (AMD) +Signed-off-by: Ivan Lipski +Tested-by: Dan Wheeler +Signed-off-by: Alex Deucher +(cherry picked from commit 669dca37b3348a447db04bbdcbb3def94d5997cc) +Cc: stable@vger.kernel.org +Signed-off-by: Greg Kroah-Hartman +--- + drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c | 9 +++++---- + 1 file changed, 5 insertions(+), 4 deletions(-) + +--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c ++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +@@ -1633,12 +1633,13 @@ static bool retrieve_link_cap(struct dc_ + status = dpcd_get_tunneling_device_data(link); + + dpcd_set_source_specific_data(link); +- /* Sink may need to configure internals based on vendor, so allow some +- * time before proceeding with possibly vendor specific transactions +- */ +- msleep(post_oui_delay); + + for (i = 0; i < read_dpcd_retry_cnt; i++) { ++ /* ++ * Sink may need to configure internals based on vendor, so allow some ++ * time before proceeding with possibly vendor specific transactions ++ */ ++ msleep(post_oui_delay); + status = core_link_read_dpcd( + link, + DP_DPCD_REV, diff --git a/queue-6.12/drm-amd-skip-power-ungate-during-suspend-for-vpe.patch b/queue-6.12/drm-amd-skip-power-ungate-during-suspend-for-vpe.patch new file mode 100644 index 0000000000..038d2cd35b --- /dev/null +++ b/queue-6.12/drm-amd-skip-power-ungate-during-suspend-for-vpe.patch @@ -0,0 +1,42 @@ +From 31ab31433c9bd2f255c48dc6cb9a99845c58b1e4 Mon Sep 17 00:00:00 2001 +From: Mario Limonciello +Date: Tue, 18 Nov 2025 07:18:10 -0600 +Subject: drm/amd: Skip power ungate during suspend for VPE + +From: Mario Limonciello + +commit 31ab31433c9bd2f255c48dc6cb9a99845c58b1e4 upstream. + +During the suspend sequence VPE is already going to be power gated +as part of vpe_suspend(). It's unnecessary to call during calls to +amdgpu_device_set_pg_state(). + +It actually can expose a race condition with the firmware if s0i3 +sequence starts as well. Drop these calls. + +Cc: Peyton.Lee@amd.com +Reviewed-by: Alex Deucher +Signed-off-by: Mario Limonciello +Signed-off-by: Alex Deucher +(cherry picked from commit 2a6c826cfeedd7714611ac115371a959ead55bda) +Cc: stable@vger.kernel.org +Signed-off-by: Greg Kroah-Hartman +--- + drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +@@ -3090,10 +3090,11 @@ int amdgpu_device_set_pg_state(struct am + (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) + continue; +- /* skip CG for VCE/UVD, it's handled specially */ ++ /* skip CG for VCE/UVD/VPE, it's handled specially */ + if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && + adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && + adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && ++ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VPE && + adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && + adev->ip_blocks[i].version->funcs->set_powergating_state) { + /* enable powergating to save power */ diff --git a/queue-6.12/drm-amdgpu-skip-emit-de-meta-data-on-gfx11-with-rs64-enabled.patch b/queue-6.12/drm-amdgpu-skip-emit-de-meta-data-on-gfx11-with-rs64-enabled.patch new file mode 100644 index 0000000000..0e8c8bf9e3 --- /dev/null +++ b/queue-6.12/drm-amdgpu-skip-emit-de-meta-data-on-gfx11-with-rs64-enabled.patch @@ -0,0 +1,43 @@ +From 80d8a9ad1587b64c545d515ab6cb7ecb9908e1b3 Mon Sep 17 00:00:00 2001 +From: Yifan Zha +Date: Fri, 14 Nov 2025 17:48:58 +0800 +Subject: drm/amdgpu: Skip emit de meta data on gfx11 with rs64 enabled + +From: Yifan Zha + +commit 80d8a9ad1587b64c545d515ab6cb7ecb9908e1b3 upstream. + +[Why] +Accoreding to CP updated to RS64 on gfx11, +WRITE_DATA with PREEMPTION_META_MEMORY(dst_sel=8) is illegal for CP FW. +That packet is used for MCBP on F32 based system. +So it would lead to incorrect GRBM write and FW is not handling that +extra case correctly. + +[How] +With gfx11 rs64 enabled, skip emit de meta data. + +Signed-off-by: Yifan Zha +Acked-by: Alex Deucher +Signed-off-by: Alex Deucher +(cherry picked from commit 8366cd442d226463e673bed5d199df916f4ecbcf) +Cc: stable@vger.kernel.org +Signed-off-by: Greg Kroah-Hartman +--- + drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +@@ -5642,9 +5642,9 @@ static void gfx_v11_0_ring_emit_ib_gfx(s + if (flags & AMDGPU_IB_PREEMPTED) + control |= INDIRECT_BUFFER_PRE_RESUME(1); + +- if (vmid) ++ if (vmid && !ring->adev->gfx.rs64_enable) + gfx_v11_0_ring_emit_de_meta(ring, +- (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false); ++ !amdgpu_sriov_vf(ring->adev) && (flags & AMDGPU_IB_PREEMPTED)); + } + + if (ring->is_mes_queue) diff --git a/queue-6.12/drm-radeon-delete-radeon_fence_process-in-is_signaled-no-deadlock.patch b/queue-6.12/drm-radeon-delete-radeon_fence_process-in-is_signaled-no-deadlock.patch new file mode 100644 index 0000000000..56c5924229 --- /dev/null +++ b/queue-6.12/drm-radeon-delete-radeon_fence_process-in-is_signaled-no-deadlock.patch @@ -0,0 +1,46 @@ +From 9eb00b5f5697bd56baa3222c7a1426fa15bacfb5 Mon Sep 17 00:00:00 2001 +From: Robert McClinton +Date: Sun, 16 Nov 2025 12:33:21 -0500 +Subject: drm/radeon: delete radeon_fence_process in is_signaled, no deadlock + +From: Robert McClinton + +commit 9eb00b5f5697bd56baa3222c7a1426fa15bacfb5 upstream. + +Delete the attempt to progress the queue when checking if fence is +signaled. This avoids deadlock. + +dma-fence_ops::signaled can be called with the fence lock in unknown +state. For radeon, the fence lock is also the wait queue lock. This can +cause a self deadlock when signaled() tries to make forward progress on +the wait queue. But advancing the queue is unneeded because incorrectly +returning false from signaled() is perfectly acceptable. + +Link: https://github.com/brave/brave-browser/issues/49182 +Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4641 +Cc: Alex Deucher +Signed-off-by: Robert McClinton +Signed-off-by: Alex Deucher +(cherry picked from commit 527ba26e50ec2ca2be9c7c82f3ad42998a75d0db) +Cc: stable@vger.kernel.org +Signed-off-by: Greg Kroah-Hartman +--- + drivers/gpu/drm/radeon/radeon_fence.c | 7 ------- + 1 file changed, 7 deletions(-) + +--- a/drivers/gpu/drm/radeon/radeon_fence.c ++++ b/drivers/gpu/drm/radeon/radeon_fence.c +@@ -360,13 +360,6 @@ static bool radeon_fence_is_signaled(str + if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) + return true; + +- if (down_read_trylock(&rdev->exclusive_lock)) { +- radeon_fence_process(rdev, ring); +- up_read(&rdev->exclusive_lock); +- +- if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) +- return true; +- } + return false; + } + diff --git a/queue-6.12/drm-tegra-dc-fix-reference-leak-in-tegra_dc_couple.patch b/queue-6.12/drm-tegra-dc-fix-reference-leak-in-tegra_dc_couple.patch new file mode 100644 index 0000000000..0c957ded70 --- /dev/null +++ b/queue-6.12/drm-tegra-dc-fix-reference-leak-in-tegra_dc_couple.patch @@ -0,0 +1,37 @@ +From 4c5376b4b143c4834ebd392aef2215847752b16a Mon Sep 17 00:00:00 2001 +From: Ma Ke +Date: Wed, 22 Oct 2025 19:47:20 +0800 +Subject: drm/tegra: dc: Fix reference leak in tegra_dc_couple() + +From: Ma Ke + +commit 4c5376b4b143c4834ebd392aef2215847752b16a upstream. + +driver_find_device() calls get_device() to increment the reference +count once a matching device is found, but there is no put_device() to +balance the reference count. To avoid reference count leakage, add +put_device() to decrease the reference count. + +Found by code review. + +Cc: stable@vger.kernel.org +Fixes: a31500fe7055 ("drm/tegra: dc: Restore coupling of display controllers") +Signed-off-by: Ma Ke +Acked-by: Mikko Perttunen +Signed-off-by: Thierry Reding +Link: https://patch.msgid.link/20251022114720.24937-1-make24@iscas.ac.cn +Signed-off-by: Greg Kroah-Hartman +--- + drivers/gpu/drm/tegra/dc.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/gpu/drm/tegra/dc.c ++++ b/drivers/gpu/drm/tegra/dc.c +@@ -3147,6 +3147,7 @@ static int tegra_dc_couple(struct tegra_ + dc->client.parent = &parent->client; + + dev_dbg(dc->dev, "coupled to %s\n", dev_name(companion)); ++ put_device(companion); + } + + return 0; diff --git a/queue-6.12/mptcp-avoid-unneeded-subflow-level-drops.patch b/queue-6.12/mptcp-avoid-unneeded-subflow-level-drops.patch new file mode 100644 index 0000000000..eb6ff0bc43 --- /dev/null +++ b/queue-6.12/mptcp-avoid-unneeded-subflow-level-drops.patch @@ -0,0 +1,116 @@ +From 4f102d747cadd8f595f2b25882eed9bec1675fb1 Mon Sep 17 00:00:00 2001 +From: Paolo Abeni +Date: Tue, 18 Nov 2025 08:20:20 +0100 +Subject: mptcp: avoid unneeded subflow-level drops + +From: Paolo Abeni + +commit 4f102d747cadd8f595f2b25882eed9bec1675fb1 upstream. + +The rcv window is shared among all the subflows. Currently, MPTCP sync +the TCP-level rcv window with the MPTCP one at tcp_transmit_skb() time. + +The above means that incoming data may sporadically observe outdated +TCP-level rcv window and being wrongly dropped by TCP. + +Address the issue checking for the edge condition before queuing the +data at TCP level, and eventually syncing the rcv window as needed. + +Note that the issue is actually present from the very first MPTCP +implementation, but backports older than the blamed commit below will +range from impossible to useless. + +Before: + + $ nstat -n; sleep 1; nstat -z TcpExtBeyondWindow + TcpExtBeyondWindow 14 0.0 + +After: + + $ nstat -n; sleep 1; nstat -z TcpExtBeyondWindow + TcpExtBeyondWindow 0 0.0 + +Fixes: fa3fe2b15031 ("mptcp: track window announced to peer") +Cc: stable@vger.kernel.org +Signed-off-by: Paolo Abeni +Reviewed-by: Matthieu Baerts (NGI0) +Signed-off-by: Matthieu Baerts (NGI0) +Link: https://patch.msgid.link/20251118-net-mptcp-misc-fixes-6-18-rc6-v1-2-806d3781c95f@kernel.org +Signed-off-by: Jakub Kicinski +Signed-off-by: Greg Kroah-Hartman +--- + net/mptcp/options.c | 31 +++++++++++++++++++++++++++++++ + net/mptcp/protocol.h | 1 + + 2 files changed, 32 insertions(+) + +--- a/net/mptcp/options.c ++++ b/net/mptcp/options.c +@@ -1044,6 +1044,31 @@ static void __mptcp_snd_una_update(struc + WRITE_ONCE(msk->snd_una, new_snd_una); + } + ++static void rwin_update(struct mptcp_sock *msk, struct sock *ssk, ++ struct sk_buff *skb) ++{ ++ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); ++ struct tcp_sock *tp = tcp_sk(ssk); ++ u64 mptcp_rcv_wnd; ++ ++ /* Avoid touching extra cachelines if TCP is going to accept this ++ * skb without filling the TCP-level window even with a possibly ++ * outdated mptcp-level rwin. ++ */ ++ if (!skb->len || skb->len < tcp_receive_window(tp)) ++ return; ++ ++ mptcp_rcv_wnd = atomic64_read(&msk->rcv_wnd_sent); ++ if (!after64(mptcp_rcv_wnd, subflow->rcv_wnd_sent)) ++ return; ++ ++ /* Some other subflow grew the mptcp-level rwin since rcv_wup, ++ * resync. ++ */ ++ tp->rcv_wnd += mptcp_rcv_wnd - subflow->rcv_wnd_sent; ++ subflow->rcv_wnd_sent = mptcp_rcv_wnd; ++} ++ + static void ack_update_msk(struct mptcp_sock *msk, + struct sock *ssk, + struct mptcp_options_received *mp_opt) +@@ -1211,6 +1236,7 @@ bool mptcp_incoming_options(struct sock + */ + if (mp_opt.use_ack) + ack_update_msk(msk, sk, &mp_opt); ++ rwin_update(msk, sk, skb); + + /* Zero-data-length packets are dropped by the caller and not + * propagated to the MPTCP layer, so the skb extension does not +@@ -1297,6 +1323,10 @@ static void mptcp_set_rwin(struct tcp_so + + if (rcv_wnd_new != rcv_wnd_old) { + raise_win: ++ /* The msk-level rcv wnd is after the tcp level one, ++ * sync the latter. ++ */ ++ rcv_wnd_new = rcv_wnd_old; + win = rcv_wnd_old - ack_seq; + tp->rcv_wnd = min_t(u64, win, U32_MAX); + new_win = tp->rcv_wnd; +@@ -1320,6 +1350,7 @@ raise_win: + + update_wspace: + WRITE_ONCE(msk->old_wspace, tp->rcv_wnd); ++ subflow->rcv_wnd_sent = rcv_wnd_new; + } + + static void mptcp_track_rwin(struct tcp_sock *tp) +--- a/net/mptcp/protocol.h ++++ b/net/mptcp/protocol.h +@@ -508,6 +508,7 @@ struct mptcp_subflow_context { + u64 remote_key; + u64 idsn; + u64 map_seq; ++ u64 rcv_wnd_sent; + u32 snd_isn; + u32 token; + u32 rel_write_seq; diff --git a/queue-6.12/mptcp-decouple-mptcp-fastclose-from-tcp-close.patch b/queue-6.12/mptcp-decouple-mptcp-fastclose-from-tcp-close.patch new file mode 100644 index 0000000000..67dade32cc --- /dev/null +++ b/queue-6.12/mptcp-decouple-mptcp-fastclose-from-tcp-close.patch @@ -0,0 +1,96 @@ +From fff0c87996672816a84c3386797a5e69751c5888 Mon Sep 17 00:00:00 2001 +From: Paolo Abeni +Date: Tue, 18 Nov 2025 08:20:23 +0100 +Subject: mptcp: decouple mptcp fastclose from tcp close + +From: Paolo Abeni + +commit fff0c87996672816a84c3386797a5e69751c5888 upstream. + +With the current fastclose implementation, the mptcp_do_fastclose() +helper is in charge of two distinct actions: send the fastclose reset +and cleanup the subflows. + +Formally decouple the two steps, ensuring that mptcp explicitly closes +all the subflows after the mentioned helper. + +This will make the upcoming fix simpler, and allows dropping the 2nd +argument from mptcp_destroy_common(). The Fixes tag is then the same as +in the next commit to help with the backports. + +Fixes: d21f83485518 ("mptcp: use fastclose on more edge scenarios") +Cc: stable@vger.kernel.org +Signed-off-by: Paolo Abeni +Reviewed-by: Geliang Tang +Reviewed-by: Matthieu Baerts (NGI0) +Signed-off-by: Matthieu Baerts (NGI0) +Link: https://patch.msgid.link/20251118-net-mptcp-misc-fixes-6-18-rc6-v1-5-806d3781c95f@kernel.org +Signed-off-by: Jakub Kicinski +Signed-off-by: Greg Kroah-Hartman +--- + net/mptcp/protocol.c | 13 +++++++++---- + net/mptcp/protocol.h | 2 +- + 2 files changed, 10 insertions(+), 5 deletions(-) + +--- a/net/mptcp/protocol.c ++++ b/net/mptcp/protocol.c +@@ -2869,7 +2869,11 @@ static void mptcp_worker(struct work_str + __mptcp_close_subflow(sk); + + if (mptcp_close_tout_expired(sk)) { ++ struct mptcp_subflow_context *subflow, *tmp; ++ + mptcp_do_fastclose(sk); ++ mptcp_for_each_subflow_safe(msk, subflow, tmp) ++ __mptcp_close_ssk(sk, subflow->tcp_sock, subflow, 0); + mptcp_close_wake_up(sk); + } + +@@ -3301,7 +3305,8 @@ static int mptcp_disconnect(struct sock + /* msk->subflow is still intact, the following will not free the first + * subflow + */ +- mptcp_destroy_common(msk, MPTCP_CF_FASTCLOSE); ++ mptcp_do_fastclose(sk); ++ mptcp_destroy_common(msk); + + /* The first subflow is already in TCP_CLOSE status, the following + * can't overlap with a fallback anymore +@@ -3483,7 +3488,7 @@ void mptcp_rcv_space_init(struct mptcp_s + msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT; + } + +-void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags) ++void mptcp_destroy_common(struct mptcp_sock *msk) + { + struct mptcp_subflow_context *subflow, *tmp; + struct sock *sk = (struct sock *)msk; +@@ -3492,7 +3497,7 @@ void mptcp_destroy_common(struct mptcp_s + + /* join list will be eventually flushed (with rst) at sock lock release time */ + mptcp_for_each_subflow_safe(msk, subflow, tmp) +- __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, flags); ++ __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, 0); + + /* move to sk_receive_queue, sk_stream_kill_queues will purge it */ + mptcp_data_lock(sk); +@@ -3517,7 +3522,7 @@ static void mptcp_destroy(struct sock *s + + /* allow the following to close even the initial subflow */ + msk->free_first = 1; +- mptcp_destroy_common(msk, 0); ++ mptcp_destroy_common(msk); + sk_sockets_allocated_dec(sk); + } + +--- a/net/mptcp/protocol.h ++++ b/net/mptcp/protocol.h +@@ -968,7 +968,7 @@ static inline void mptcp_propagate_sndbu + local_bh_enable(); + } + +-void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags); ++void mptcp_destroy_common(struct mptcp_sock *msk); + + #define MPTCP_TOKEN_MAX_RETRIES 4 + diff --git a/queue-6.12/mptcp-do-not-fallback-when-ooo-is-present.patch b/queue-6.12/mptcp-do-not-fallback-when-ooo-is-present.patch new file mode 100644 index 0000000000..4e127807f9 --- /dev/null +++ b/queue-6.12/mptcp-do-not-fallback-when-ooo-is-present.patch @@ -0,0 +1,43 @@ +From 1bba3f219c5e8c29e63afa3c1fc24f875ebec119 Mon Sep 17 00:00:00 2001 +From: Paolo Abeni +Date: Tue, 18 Nov 2025 08:20:22 +0100 +Subject: mptcp: do not fallback when OoO is present + +From: Paolo Abeni + +commit 1bba3f219c5e8c29e63afa3c1fc24f875ebec119 upstream. + +In case of DSS corruption, the MPTCP protocol tries to avoid the subflow +reset if fallback is possible. Such corruptions happen in the receive +path; to ensure fallback is possible the stack additionally needs to +check for OoO data, otherwise the fallback will break the data stream. + +Fixes: e32d262c89e2 ("mptcp: handle consistently DSS corruption") +Cc: stable@vger.kernel.org +Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/598 +Signed-off-by: Paolo Abeni +Reviewed-by: Matthieu Baerts (NGI0) +Signed-off-by: Matthieu Baerts (NGI0) +Link: https://patch.msgid.link/20251118-net-mptcp-misc-fixes-6-18-rc6-v1-4-806d3781c95f@kernel.org +Signed-off-by: Jakub Kicinski +Signed-off-by: Greg Kroah-Hartman +--- + net/mptcp/protocol.c | 7 +++++++ + 1 file changed, 7 insertions(+) + +--- a/net/mptcp/protocol.c ++++ b/net/mptcp/protocol.c +@@ -906,6 +906,13 @@ static bool __mptcp_finish_join(struct m + if (sk->sk_state != TCP_ESTABLISHED) + return false; + ++ /* The caller possibly is not holding the msk socket lock, but ++ * in the fallback case only the current subflow is touching ++ * the OoO queue. ++ */ ++ if (!RB_EMPTY_ROOT(&msk->out_of_order_queue)) ++ return false; ++ + spin_lock_bh(&msk->fallback_lock); + if (!msk->allow_subflows) { + spin_unlock_bh(&msk->fallback_lock); diff --git a/queue-6.12/mptcp-fix-ack-generation-for-fallback-msk.patch b/queue-6.12/mptcp-fix-ack-generation-for-fallback-msk.patch new file mode 100644 index 0000000000..dcef1e219e --- /dev/null +++ b/queue-6.12/mptcp-fix-ack-generation-for-fallback-msk.patch @@ -0,0 +1,85 @@ +From 5e15395f6d9ec07395866c5511f4b4ac566c0c9b Mon Sep 17 00:00:00 2001 +From: Paolo Abeni +Date: Tue, 18 Nov 2025 08:20:19 +0100 +Subject: mptcp: fix ack generation for fallback msk + +From: Paolo Abeni + +commit 5e15395f6d9ec07395866c5511f4b4ac566c0c9b upstream. + +mptcp_cleanup_rbuf() needs to know the last most recent, mptcp-level +rcv_wnd sent, and such information is tracked into the msk->old_wspace +field, updated at ack transmission time by mptcp_write_options(). + +Fallback socket do not add any mptcp options, such helper is never +invoked, and msk->old_wspace value remain stale. That in turn makes +ack generation at recvmsg() time quite random. + +Address the issue ensuring mptcp_write_options() is invoked even for +fallback sockets, and just update the needed info in such a case. + +The issue went unnoticed for a long time, as mptcp currently overshots +the fallback socket receive buffer autotune significantly. It is going +to change in the near future. + +Fixes: e3859603ba13 ("mptcp: better msk receive window updates") +Cc: stable@vger.kernel.org +Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/594 +Signed-off-by: Paolo Abeni +Reviewed-by: Geliang Tang +Reviewed-by: Matthieu Baerts (NGI0) +Signed-off-by: Matthieu Baerts (NGI0) +Link: https://patch.msgid.link/20251118-net-mptcp-misc-fixes-6-18-rc6-v1-1-806d3781c95f@kernel.org +Signed-off-by: Jakub Kicinski +Signed-off-by: Greg Kroah-Hartman +--- + net/mptcp/options.c | 23 ++++++++++++++++++++++- + 1 file changed, 22 insertions(+), 1 deletion(-) + +--- a/net/mptcp/options.c ++++ b/net/mptcp/options.c +@@ -839,8 +839,11 @@ bool mptcp_established_options(struct so + + opts->suboptions = 0; + ++ /* Force later mptcp_write_options(), but do not use any actual ++ * option space. ++ */ + if (unlikely(__mptcp_check_fallback(msk) && !mptcp_check_infinite_map(skb))) +- return false; ++ return true; + + if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST)) { + if (mptcp_established_options_fastclose(sk, &opt_size, remaining, opts) || +@@ -1319,6 +1322,20 @@ update_wspace: + WRITE_ONCE(msk->old_wspace, tp->rcv_wnd); + } + ++static void mptcp_track_rwin(struct tcp_sock *tp) ++{ ++ const struct sock *ssk = (const struct sock *)tp; ++ struct mptcp_subflow_context *subflow; ++ struct mptcp_sock *msk; ++ ++ if (!ssk) ++ return; ++ ++ subflow = mptcp_subflow_ctx(ssk); ++ msk = mptcp_sk(subflow->conn); ++ WRITE_ONCE(msk->old_wspace, tp->rcv_wnd); ++} ++ + __sum16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum) + { + struct csum_pseudo_header header; +@@ -1611,6 +1628,10 @@ mp_rst: + opts->reset_transient, + opts->reset_reason); + return; ++ } else if (unlikely(!opts->suboptions)) { ++ /* Fallback to TCP */ ++ mptcp_track_rwin(tp); ++ return; + } + + if (OPTION_MPTCP_PRIO & opts->suboptions) { diff --git a/queue-6.12/mptcp-fix-duplicate-reset-on-fastclose.patch b/queue-6.12/mptcp-fix-duplicate-reset-on-fastclose.patch new file mode 100644 index 0000000000..61076cce89 --- /dev/null +++ b/queue-6.12/mptcp-fix-duplicate-reset-on-fastclose.patch @@ -0,0 +1,109 @@ +From ae155060247be8dcae3802a95bd1bdf93ab3215d Mon Sep 17 00:00:00 2001 +From: Paolo Abeni +Date: Tue, 18 Nov 2025 08:20:24 +0100 +Subject: mptcp: fix duplicate reset on fastclose + +From: Paolo Abeni + +commit ae155060247be8dcae3802a95bd1bdf93ab3215d upstream. + +The CI reports sporadic failures of the fastclose self-tests. The root +cause is a duplicate reset, not carrying the relevant MPTCP option. +In the failing scenario the bad reset is received by the peer before +the fastclose one, preventing the reception of the latter. + +Indeed there is window of opportunity at fastclose time for the +following race: + + mptcp_do_fastclose + __mptcp_close_ssk + __tcp_close() + tcp_set_state() [1] + tcp_send_active_reset() [2] + +After [1] the stack will send reset to in-flight data reaching the now +closed port. Such reset may race with [2]. + +Address the issue explicitly sending a single reset on fastclose before +explicitly moving the subflow to close status. + +Fixes: d21f83485518 ("mptcp: use fastclose on more edge scenarios") +Cc: stable@vger.kernel.org +Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/596 +Signed-off-by: Paolo Abeni +Reviewed-by: Geliang Tang +Reviewed-by: Matthieu Baerts (NGI0) +Signed-off-by: Matthieu Baerts (NGI0) +Link: https://patch.msgid.link/20251118-net-mptcp-misc-fixes-6-18-rc6-v1-6-806d3781c95f@kernel.org +Signed-off-by: Jakub Kicinski +Signed-off-by: Greg Kroah-Hartman +--- + net/mptcp/protocol.c | 36 +++++++++++++++++++++++------------- + 1 file changed, 23 insertions(+), 13 deletions(-) + +--- a/net/mptcp/protocol.c ++++ b/net/mptcp/protocol.c +@@ -2461,7 +2461,6 @@ bool __mptcp_retransmit_pending_data(str + + /* flags for __mptcp_close_ssk() */ + #define MPTCP_CF_PUSH BIT(1) +-#define MPTCP_CF_FASTCLOSE BIT(2) + + /* be sure to send a reset only if the caller asked for it, also + * clean completely the subflow status when the subflow reaches +@@ -2472,7 +2471,7 @@ static void __mptcp_subflow_disconnect(s + unsigned int flags) + { + if (((1 << ssk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || +- (flags & MPTCP_CF_FASTCLOSE)) { ++ subflow->send_fastclose) { + /* The MPTCP code never wait on the subflow sockets, TCP-level + * disconnect should never fail + */ +@@ -2519,14 +2518,8 @@ static void __mptcp_close_ssk(struct soc + + lock_sock_nested(ssk, SINGLE_DEPTH_NESTING); + +- if ((flags & MPTCP_CF_FASTCLOSE) && !__mptcp_check_fallback(msk)) { +- /* be sure to force the tcp_close path +- * to generate the egress reset +- */ +- ssk->sk_lingertime = 0; +- sock_set_flag(ssk, SOCK_LINGER); +- subflow->send_fastclose = 1; +- } ++ if (subflow->send_fastclose && ssk->sk_state != TCP_CLOSE) ++ tcp_set_state(ssk, TCP_CLOSE); + + need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk); + if (!dispose_it) { +@@ -2829,9 +2822,26 @@ static void mptcp_do_fastclose(struct so + struct mptcp_sock *msk = mptcp_sk(sk); + + mptcp_set_state(sk, TCP_CLOSE); +- mptcp_for_each_subflow_safe(msk, subflow, tmp) +- __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), +- subflow, MPTCP_CF_FASTCLOSE); ++ ++ /* Explicitly send the fastclose reset as need */ ++ if (__mptcp_check_fallback(msk)) ++ return; ++ ++ mptcp_for_each_subflow_safe(msk, subflow, tmp) { ++ struct sock *ssk = mptcp_subflow_tcp_sock(subflow); ++ ++ lock_sock(ssk); ++ ++ /* Some subflow socket states don't allow/need a reset.*/ ++ if ((1 << ssk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) ++ goto unlock; ++ ++ subflow->send_fastclose = 1; ++ tcp_send_active_reset(ssk, ssk->sk_allocation, ++ SK_RST_REASON_TCP_ABORT_ON_CLOSE); ++unlock: ++ release_sock(ssk); ++ } + } + + static void mptcp_worker(struct work_struct *work) diff --git a/queue-6.12/mptcp-fix-premature-close-in-case-of-fallback.patch b/queue-6.12/mptcp-fix-premature-close-in-case-of-fallback.patch new file mode 100644 index 0000000000..133b38a20a --- /dev/null +++ b/queue-6.12/mptcp-fix-premature-close-in-case-of-fallback.patch @@ -0,0 +1,60 @@ +From 17393fa7b7086664be519e7230cb6ed7ec7d9462 Mon Sep 17 00:00:00 2001 +From: Paolo Abeni +Date: Tue, 18 Nov 2025 08:20:21 +0100 +Subject: mptcp: fix premature close in case of fallback + +From: Paolo Abeni + +commit 17393fa7b7086664be519e7230cb6ed7ec7d9462 upstream. + +I'm observing very frequent self-tests failures in case of fallback when +running on a CONFIG_PREEMPT kernel. + +The root cause is that subflow_sched_work_if_closed() closes any subflow +as soon as it is half-closed and has no incoming data pending. + +That works well for regular subflows - MPTCP needs bi-directional +connectivity to operate on a given subflow - but for fallback socket is +race prone. + +When TCP peer closes the connection before the MPTCP one, +subflow_sched_work_if_closed() will schedule the MPTCP worker to +gracefully close the subflow, and shortly after will do another schedule +to inject and process a dummy incoming DATA_FIN. + +On CONFIG_PREEMPT kernel, the MPTCP worker can kick-in and close the +fallback subflow before subflow_sched_work_if_closed() is able to create +the dummy DATA_FIN, unexpectedly interrupting the transfer. + +Address the issue explicitly avoiding closing fallback subflows on when +the peer is only half-closed. + +Note that, when the subflow is able to create the DATA_FIN before the +worker invocation, the worker will change the msk state before trying to +close the subflow and will skip the latter operation as the msk will not +match anymore the precondition in __mptcp_close_subflow(). + +Fixes: f09b0ad55a11 ("mptcp: close subflow when receiving TCP+FIN") +Cc: stable@vger.kernel.org +Signed-off-by: Paolo Abeni +Reviewed-by: Matthieu Baerts (NGI0) +Signed-off-by: Matthieu Baerts (NGI0) +Link: https://patch.msgid.link/20251118-net-mptcp-misc-fixes-6-18-rc6-v1-3-806d3781c95f@kernel.org +Signed-off-by: Jakub Kicinski +Signed-off-by: Greg Kroah-Hartman +--- + net/mptcp/protocol.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/net/mptcp/protocol.c ++++ b/net/mptcp/protocol.c +@@ -2615,7 +2615,8 @@ static void __mptcp_close_subflow(struct + + if (ssk_state != TCP_CLOSE && + (ssk_state != TCP_CLOSE_WAIT || +- inet_sk_state_load(sk) != TCP_ESTABLISHED)) ++ inet_sk_state_load(sk) != TCP_ESTABLISHED || ++ __mptcp_check_fallback(msk))) + continue; + + /* 'subflow_data_ready' will re-sched once rx queue is empty */ diff --git a/queue-6.12/selftests-mptcp-join-endpoints-longer-timeout.patch b/queue-6.12/selftests-mptcp-join-endpoints-longer-timeout.patch new file mode 100644 index 0000000000..8ac7aa9921 --- /dev/null +++ b/queue-6.12/selftests-mptcp-join-endpoints-longer-timeout.patch @@ -0,0 +1,75 @@ +From fb13c6bb810ca871964e062cf91882d1c83db509 Mon Sep 17 00:00:00 2001 +From: "Matthieu Baerts (NGI0)" +Date: Tue, 18 Nov 2025 08:20:26 +0100 +Subject: selftests: mptcp: join: endpoints: longer timeout + +From: Matthieu Baerts (NGI0) + +commit fb13c6bb810ca871964e062cf91882d1c83db509 upstream. + +In rare cases, when the test environment is very slow, some endpoints +tests can fail because some expected events have not been seen. + +Because the tests are expecting a long on-going connection, and they are +not waiting for the end of the transfer, it is fine to have a longer +timeout, and even go over the default one. This connection will be +killed at the end, after the verifications: increasing the timeout +doesn't change anything, apart from avoiding it to end before the end of +the verifications. + +To play it safe, all endpoints tests not waiting for the end of the +transfer are now having a longer timeout: 2 minutes. + +The Fixes commit was making the connection longer, but still, the +default timeout would have stopped it after 1 minute, which might not be +enough in very slow environments. + +Fixes: 6457595db987 ("selftests: mptcp: join: endpoints: longer transfer") +Cc: stable@vger.kernel.org +Signed-off-by: Matthieu Baerts (NGI0) +Reviewed-by: Geliang Tang +Link: https://patch.msgid.link/20251118-net-mptcp-misc-fixes-6-18-rc6-v1-8-806d3781c95f@kernel.org +Signed-off-by: Jakub Kicinski +Signed-off-by: Greg Kroah-Hartman +--- + tools/testing/selftests/net/mptcp/mptcp_join.sh | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh ++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh +@@ -3728,7 +3728,7 @@ endpoint_tests() + pm_nl_set_limits $ns1 2 2 + pm_nl_set_limits $ns2 2 2 + pm_nl_add_endpoint $ns1 10.0.2.1 flags signal +- { test_linkfail=128 speed=slow \ ++ { timeout_test=120 test_linkfail=128 speed=slow \ + run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null + local tests_pid=$! + +@@ -3755,7 +3755,7 @@ endpoint_tests() + pm_nl_set_limits $ns2 0 3 + pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow + pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow +- { test_linkfail=128 speed=5 \ ++ { timeout_test=120 test_linkfail=128 speed=5 \ + run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null + local tests_pid=$! + +@@ -3833,7 +3833,7 @@ endpoint_tests() + # broadcast IP: no packet for this address will be received on ns1 + pm_nl_add_endpoint $ns1 224.0.0.1 id 2 flags signal + pm_nl_add_endpoint $ns1 10.0.1.1 id 42 flags signal +- { test_linkfail=128 speed=5 \ ++ { timeout_test=120 test_linkfail=128 speed=5 \ + run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null + local tests_pid=$! + +@@ -3906,7 +3906,7 @@ endpoint_tests() + # broadcast IP: no packet for this address will be received on ns1 + pm_nl_add_endpoint $ns1 224.0.0.1 id 2 flags signal + pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow +- { test_linkfail=128 speed=20 \ ++ { timeout_test=120 test_linkfail=128 speed=20 \ + run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null + local tests_pid=$! + diff --git a/queue-6.12/selftests-mptcp-join-userspace-longer-timeout.patch b/queue-6.12/selftests-mptcp-join-userspace-longer-timeout.patch new file mode 100644 index 0000000000..246fb60950 --- /dev/null +++ b/queue-6.12/selftests-mptcp-join-userspace-longer-timeout.patch @@ -0,0 +1,84 @@ +From 0e4ec14dc1ee4b1ec347729c225c3ca950f2bcf6 Mon Sep 17 00:00:00 2001 +From: "Matthieu Baerts (NGI0)" +Date: Tue, 18 Nov 2025 08:20:27 +0100 +Subject: selftests: mptcp: join: userspace: longer timeout + +From: Matthieu Baerts (NGI0) + +commit 0e4ec14dc1ee4b1ec347729c225c3ca950f2bcf6 upstream. + +In rare cases, when the test environment is very slow, some userspace +tests can fail because some expected events have not been seen. + +Because the tests are expecting a long on-going connection, and they are +not waiting for the end of the transfer, it is fine to have a longer +timeout, and even go over the default one. This connection will be +killed at the end, after the verifications: increasing the timeout +doesn't change anything, apart from avoiding it to end before the end of +the verifications. + +To play it safe, all userspace tests not waiting for the end of the +transfer are now having a longer timeout: 2 minutes. + +The Fixes commit was making the connection longer, but still, the +default timeout would have stopped it after 1 minute, which might not be +enough in very slow environments. + +Fixes: 290493078b96 ("selftests: mptcp: join: userspace: longer transfer") +Cc: stable@vger.kernel.org +Signed-off-by: Matthieu Baerts (NGI0) +Reviewed-by: Geliang Tang +Link: https://patch.msgid.link/20251118-net-mptcp-misc-fixes-6-18-rc6-v1-9-806d3781c95f@kernel.org +Signed-off-by: Jakub Kicinski +Signed-off-by: Greg Kroah-Hartman +--- + tools/testing/selftests/net/mptcp/mptcp_join.sh | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh ++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh +@@ -3591,7 +3591,7 @@ userspace_tests() + continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then + set_userspace_pm $ns1 + pm_nl_set_limits $ns2 2 2 +- { test_linkfail=128 speed=5 \ ++ { timeout_test=120 test_linkfail=128 speed=5 \ + run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null + local tests_pid=$! + wait_mpj $ns1 +@@ -3624,7 +3624,7 @@ userspace_tests() + continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then + set_userspace_pm $ns2 + pm_nl_set_limits $ns1 0 1 +- { test_linkfail=128 speed=5 \ ++ { timeout_test=120 test_linkfail=128 speed=5 \ + run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null + local tests_pid=$! + wait_mpj $ns2 +@@ -3652,7 +3652,7 @@ userspace_tests() + continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then + set_userspace_pm $ns2 + pm_nl_set_limits $ns1 0 1 +- { test_linkfail=128 speed=5 \ ++ { timeout_test=120 test_linkfail=128 speed=5 \ + run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null + local tests_pid=$! + wait_mpj $ns2 +@@ -3673,7 +3673,7 @@ userspace_tests() + continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then + set_userspace_pm $ns2 + pm_nl_set_limits $ns1 0 1 +- { test_linkfail=128 speed=5 \ ++ { timeout_test=120 test_linkfail=128 speed=5 \ + run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null + local tests_pid=$! + wait_mpj $ns2 +@@ -3697,7 +3697,7 @@ userspace_tests() + continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then + set_userspace_pm $ns1 + pm_nl_set_limits $ns2 1 1 +- { test_linkfail=128 speed=5 \ ++ { timeout_test=120 test_linkfail=128 speed=5 \ + run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null + local tests_pid=$! + wait_mpj $ns1 diff --git a/queue-6.12/series b/queue-6.12/series index a3c76df299..e5868fa302 100644 --- a/queue-6.12/series +++ b/queue-6.12/series @@ -34,3 +34,19 @@ mips-malta-fix-eva-soc-it-pci-mmio.patch dt-bindings-pinctrl-toshiba-visconti-fix-number-of-items-in-groups.patch loongarch-don-t-panic-if-no-valid-cache-info-for-pci.patch mptcp-fix-race-condition-in-mptcp_schedule_work.patch +mptcp-fix-ack-generation-for-fallback-msk.patch +mptcp-fix-duplicate-reset-on-fastclose.patch +mptcp-fix-premature-close-in-case-of-fallback.patch +selftests-mptcp-join-endpoints-longer-timeout.patch +selftests-mptcp-join-userspace-longer-timeout.patch +mptcp-avoid-unneeded-subflow-level-drops.patch +mptcp-decouple-mptcp-fastclose-from-tcp-close.patch +mptcp-do-not-fallback-when-ooo-is-present.patch +drm-tegra-dc-fix-reference-leak-in-tegra_dc_couple.patch +drm-radeon-delete-radeon_fence_process-in-is_signaled-no-deadlock.patch +drm-amd-skip-power-ungate-during-suspend-for-vpe.patch +drm-amdgpu-skip-emit-de-meta-data-on-gfx11-with-rs64-enabled.patch +drm-amd-display-increase-dpcd-read-retries.patch +drm-amd-display-move-sleep-into-each-retry-for-retrieve_link_cap.patch +drm-amd-display-fix-pbn-to-kbps-conversion.patch +drm-amd-display-clear-the-cur_enable-register-on-dcn20-on-dpp5.patch