]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 30 Aug 2024 10:37:06 +0000 (12:37 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 30 Aug 2024 10:37:06 +0000 (12:37 +0200)
added patches:
binfmt_elf_fdpic-fix-auxv-size-calculation-when-elf_hwcap2-is-defined.patch
drm-amdgpu-align-pp_power_profile_mode-with-kernel-docs.patch
drm-amdgpu-swsmu-always-force-a-state-reprogram-on-init.patch
drm-i915-dp_mst-fix-mst-state-after-a-sink-reset.patch
drm-i915-dsi-make-lenovo-yoga-tab-3-x90f-dmi-match-less-strict.patch
drm-v3d-disable-preemption-while-updating-gpu-stats.patch
drm-vmwgfx-disable-coherent-dumb-buffers-without-3d.patch
drm-vmwgfx-fix-prime-with-external-buffers.patch
drm-vmwgfx-prevent-unmapping-active-read-buffers.patch
mptcp-avoid-duplicated-sub_closed-events.patch
mptcp-close-subflow-when-receiving-tcp-fin.patch
mptcp-pm-add_addr-0-is-not-a-new-address.patch
mptcp-pm-do-not-remove-already-closed-subflows.patch
mptcp-pm-fix-id-0-endp-usage-after-multiple-re-creations.patch
mptcp-pm-fix-rm_addr-id-for-the-initial-subflow.patch
mptcp-pm-reset-mpc-endp-id-when-re-added.patch
mptcp-pm-reuse-id-0-after-delete-and-re-add.patch
mptcp-pm-send-ack-on-an-active-subflow.patch
mptcp-pm-skip-connecting-to-already-established-sf.patch
mptcp-pr_debug-add-missing-n-at-the-end.patch
mptcp-sched-check-both-backup-in-retrans.patch
net-mana-fix-race-of-mana_hwc_post_rx_wqe-and-new-hwc-response.patch
selftests-mptcp-join-cannot-rm-sf-if-closed.patch
selftests-mptcp-join-check-re-re-adding-id-0-endp.patch
selftests-mptcp-join-check-removing-id-0-endpoint.patch
selftests-mptcp-join-no-extra-msg-if-no-counter.patch
wifi-mwifiex-duplicate-static-structs-used-in-driver-instances.patch
wifi-wfx-repair-open-network-ap-mode.patch

29 files changed:
queue-6.10/binfmt_elf_fdpic-fix-auxv-size-calculation-when-elf_hwcap2-is-defined.patch [new file with mode: 0644]
queue-6.10/drm-amdgpu-align-pp_power_profile_mode-with-kernel-docs.patch [new file with mode: 0644]
queue-6.10/drm-amdgpu-swsmu-always-force-a-state-reprogram-on-init.patch [new file with mode: 0644]
queue-6.10/drm-i915-dp_mst-fix-mst-state-after-a-sink-reset.patch [new file with mode: 0644]
queue-6.10/drm-i915-dsi-make-lenovo-yoga-tab-3-x90f-dmi-match-less-strict.patch [new file with mode: 0644]
queue-6.10/drm-v3d-disable-preemption-while-updating-gpu-stats.patch [new file with mode: 0644]
queue-6.10/drm-vmwgfx-disable-coherent-dumb-buffers-without-3d.patch [new file with mode: 0644]
queue-6.10/drm-vmwgfx-fix-prime-with-external-buffers.patch [new file with mode: 0644]
queue-6.10/drm-vmwgfx-prevent-unmapping-active-read-buffers.patch [new file with mode: 0644]
queue-6.10/mptcp-avoid-duplicated-sub_closed-events.patch [new file with mode: 0644]
queue-6.10/mptcp-close-subflow-when-receiving-tcp-fin.patch [new file with mode: 0644]
queue-6.10/mptcp-pm-add_addr-0-is-not-a-new-address.patch [new file with mode: 0644]
queue-6.10/mptcp-pm-do-not-remove-already-closed-subflows.patch [new file with mode: 0644]
queue-6.10/mptcp-pm-fix-id-0-endp-usage-after-multiple-re-creations.patch [new file with mode: 0644]
queue-6.10/mptcp-pm-fix-rm_addr-id-for-the-initial-subflow.patch [new file with mode: 0644]
queue-6.10/mptcp-pm-reset-mpc-endp-id-when-re-added.patch [new file with mode: 0644]
queue-6.10/mptcp-pm-reuse-id-0-after-delete-and-re-add.patch [new file with mode: 0644]
queue-6.10/mptcp-pm-send-ack-on-an-active-subflow.patch [new file with mode: 0644]
queue-6.10/mptcp-pm-skip-connecting-to-already-established-sf.patch [new file with mode: 0644]
queue-6.10/mptcp-pr_debug-add-missing-n-at-the-end.patch [new file with mode: 0644]
queue-6.10/mptcp-sched-check-both-backup-in-retrans.patch [new file with mode: 0644]
queue-6.10/net-mana-fix-race-of-mana_hwc_post_rx_wqe-and-new-hwc-response.patch [new file with mode: 0644]
queue-6.10/selftests-mptcp-join-cannot-rm-sf-if-closed.patch [new file with mode: 0644]
queue-6.10/selftests-mptcp-join-check-re-re-adding-id-0-endp.patch [new file with mode: 0644]
queue-6.10/selftests-mptcp-join-check-removing-id-0-endpoint.patch [new file with mode: 0644]
queue-6.10/selftests-mptcp-join-no-extra-msg-if-no-counter.patch [new file with mode: 0644]
queue-6.10/series
queue-6.10/wifi-mwifiex-duplicate-static-structs-used-in-driver-instances.patch [new file with mode: 0644]
queue-6.10/wifi-wfx-repair-open-network-ap-mode.patch [new file with mode: 0644]

diff --git a/queue-6.10/binfmt_elf_fdpic-fix-auxv-size-calculation-when-elf_hwcap2-is-defined.patch b/queue-6.10/binfmt_elf_fdpic-fix-auxv-size-calculation-when-elf_hwcap2-is-defined.patch
new file mode 100644 (file)
index 0000000..24f2ed3
--- /dev/null
@@ -0,0 +1,43 @@
+From c6a09e342f8e6d3cac7f7c5c14085236aca284b9 Mon Sep 17 00:00:00 2001
+From: Max Filippov <jcmvbkbc@gmail.com>
+Date: Sun, 25 Aug 2024 20:27:45 -0700
+Subject: binfmt_elf_fdpic: fix AUXV size calculation when ELF_HWCAP2 is defined
+
+From: Max Filippov <jcmvbkbc@gmail.com>
+
+commit c6a09e342f8e6d3cac7f7c5c14085236aca284b9 upstream.
+
+create_elf_fdpic_tables() does not correctly account the space for the
+AUX vector when an architecture has ELF_HWCAP2 defined. Prior to the
+commit 10e29251be0e ("binfmt_elf_fdpic: fix /proc/<pid>/auxv") it
+resulted in the last entry of the AUX vector being set to zero, but with
+that change it results in a kernel BUG.
+
+Fix that by adding one to the number of AUXV entries (nitems) when
+ELF_HWCAP2 is defined.
+
+Fixes: 10e29251be0e ("binfmt_elf_fdpic: fix /proc/<pid>/auxv")
+Cc: stable@vger.kernel.org
+Reported-by: Greg Ungerer <gerg@kernel.org>
+Closes: https://lore.kernel.org/lkml/5b51975f-6d0b-413c-8b38-39a6a45e8821@westnet.com.au/
+Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
+Tested-by: Greg Ungerer <gerg@kernel.org>
+Link: https://lore.kernel.org/r/20240826032745.3423812-1-jcmvbkbc@gmail.com
+Signed-off-by: Kees Cook <kees@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/binfmt_elf_fdpic.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/binfmt_elf_fdpic.c
++++ b/fs/binfmt_elf_fdpic.c
+@@ -592,6 +592,9 @@ static int create_elf_fdpic_tables(struc
+       if (bprm->have_execfd)
+               nitems++;
++#ifdef ELF_HWCAP2
++      nitems++;
++#endif
+       csp = sp;
+       sp -= nitems * 2 * sizeof(unsigned long);
diff --git a/queue-6.10/drm-amdgpu-align-pp_power_profile_mode-with-kernel-docs.patch b/queue-6.10/drm-amdgpu-align-pp_power_profile_mode-with-kernel-docs.patch
new file mode 100644 (file)
index 0000000..6949a83
--- /dev/null
@@ -0,0 +1,44 @@
+From 8f614469de248a4bc55fb07e55d5f4c340c75b11 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Wed, 21 Aug 2024 14:32:02 -0400
+Subject: drm/amdgpu: align pp_power_profile_mode with kernel docs
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 8f614469de248a4bc55fb07e55d5f4c340c75b11 upstream.
+
+The kernel doc says you need to select manual mode to
+adjust this, but the code only allows you to adjust it when
+manual mode is not selected.  Remove the manual mode check.
+
+Reviewed-by: Kenneth Feng <kenneth.feng@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit bbb05f8a9cd87f5046d05a0c596fddfb714ee457)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c |    6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -2256,8 +2256,7 @@ static int smu_adjust_power_state_dynami
+               smu_dpm_ctx->dpm_level = level;
+       }
+-      if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
+-              smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
++      if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
+               index = fls(smu->workload_mask);
+               index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+               workload[0] = smu->workload_setting[index];
+@@ -2334,8 +2333,7 @@ static int smu_switch_power_profile(void
+               workload[0] = smu->workload_setting[index];
+       }
+-      if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
+-              smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
++      if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
+               smu_bump_power_profile_mode(smu, workload, 0);
+       return 0;
diff --git a/queue-6.10/drm-amdgpu-swsmu-always-force-a-state-reprogram-on-init.patch b/queue-6.10/drm-amdgpu-swsmu-always-force-a-state-reprogram-on-init.patch
new file mode 100644 (file)
index 0000000..480bf92
--- /dev/null
@@ -0,0 +1,71 @@
+From d420c857d85777663e8d16adfc24463f5d5c2dbc Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 22 Aug 2024 21:54:24 -0400
+Subject: drm/amdgpu/swsmu: always force a state reprogram on init
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit d420c857d85777663e8d16adfc24463f5d5c2dbc upstream.
+
+Always reprogram the hardware state on init.  This ensures
+the PMFW state is explicitly programmed and we are not relying
+on the default PMFW state.
+
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/3131
+Reviewed-by: Kenneth Feng <kenneth.feng@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit c50fe289ed7207f71df3b5f1720512a9620e84fb)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c |   15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -2215,8 +2215,9 @@ static int smu_bump_power_profile_mode(s
+ }
+ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
+-                                 enum amd_dpm_forced_level level,
+-                                 bool skip_display_settings)
++                                        enum amd_dpm_forced_level level,
++                                        bool skip_display_settings,
++                                        bool force_update)
+ {
+       int ret = 0;
+       int index = 0;
+@@ -2245,7 +2246,7 @@ static int smu_adjust_power_state_dynami
+               }
+       }
+-      if (smu_dpm_ctx->dpm_level != level) {
++      if (force_update || smu_dpm_ctx->dpm_level != level) {
+               ret = smu_asic_set_performance_level(smu, level);
+               if (ret) {
+                       dev_err(smu->adev->dev, "Failed to set performance level!");
+@@ -2261,7 +2262,7 @@ static int smu_adjust_power_state_dynami
+               index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+               workload[0] = smu->workload_setting[index];
+-              if (smu->power_profile_mode != workload[0])
++              if (force_update || smu->power_profile_mode != workload[0])
+                       smu_bump_power_profile_mode(smu, workload, 0);
+       }
+@@ -2282,11 +2283,13 @@ static int smu_handle_task(struct smu_co
+               ret = smu_pre_display_config_changed(smu);
+               if (ret)
+                       return ret;
+-              ret = smu_adjust_power_state_dynamic(smu, level, false);
++              ret = smu_adjust_power_state_dynamic(smu, level, false, false);
+               break;
+       case AMD_PP_TASK_COMPLETE_INIT:
++              ret = smu_adjust_power_state_dynamic(smu, level, true, true);
++              break;
+       case AMD_PP_TASK_READJUST_POWER_STATE:
+-              ret = smu_adjust_power_state_dynamic(smu, level, true);
++              ret = smu_adjust_power_state_dynamic(smu, level, true, false);
+               break;
+       default:
+               break;
diff --git a/queue-6.10/drm-i915-dp_mst-fix-mst-state-after-a-sink-reset.patch b/queue-6.10/drm-i915-dp_mst-fix-mst-state-after-a-sink-reset.patch
new file mode 100644 (file)
index 0000000..6991dff
--- /dev/null
@@ -0,0 +1,116 @@
+From a2ccc33b88e2953a6bf0b309e7e8849cc5320018 Mon Sep 17 00:00:00 2001
+From: Imre Deak <imre.deak@intel.com>
+Date: Fri, 23 Aug 2024 19:29:18 +0300
+Subject: drm/i915/dp_mst: Fix MST state after a sink reset
+
+From: Imre Deak <imre.deak@intel.com>
+
+commit a2ccc33b88e2953a6bf0b309e7e8849cc5320018 upstream.
+
+In some cases the sink can reset itself after it was configured into MST
+mode, without the driver noticing the disconnected state. For instance
+the reset may happen in the middle of a modeset, or the (long) HPD pulse
+generated may be not long enough for the encoder detect handler to
+observe the HPD's deasserted state. In this case the sink's DPCD
+register programmed to enable MST will be reset, while the driver still
+assumes MST is still enabled. Detect this condition, which will tear
+down and recreate/re-enable the MST topology.
+
+v2:
+- Add a code comment about adjusting the expected DP_MSTM_CTRL register
+  value for SST + SideBand. (Suraj, Jani)
+- Print a debug message about detecting the link reset. (Jani)
+- Verify the DPCD MST state only if it wasn't already determined that
+  the sink is disconnected.
+
+Cc: stable@vger.kernel.org
+Cc: Jani Nikula <jani.nikula@intel.com>
+Closes: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/11195
+Reviewed-by: Suraj Kandpal <suraj.kandpal@intel.com> (v1)
+Signed-off-by: Imre Deak <imre.deak@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240823162918.1211875-1-imre.deak@intel.com
+(cherry picked from commit 594cf78dc36f31c0c7e0de4567e644f406d46bae)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/display/intel_dp.c     |   12 ++++++++
+ drivers/gpu/drm/i915/display/intel_dp_mst.c |   40 ++++++++++++++++++++++++++++
+ drivers/gpu/drm/i915/display/intel_dp_mst.h |    1 
+ 3 files changed, 53 insertions(+)
+
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -5860,6 +5860,18 @@ intel_dp_detect(struct drm_connector *co
+       else
+               status = connector_status_disconnected;
++      if (status != connector_status_disconnected &&
++          !intel_dp_mst_verify_dpcd_state(intel_dp))
++              /*
++               * This requires retrying detection for instance to re-enable
++               * the MST mode that got reset via a long HPD pulse. The retry
++               * will happen either via the hotplug handler's retry logic,
++               * ensured by setting the connector here to SST/disconnected,
++               * or via a userspace connector probing in response to the
++               * hotplug uevent sent when removing the MST connectors.
++               */
++              status = connector_status_disconnected;
++
+       if (status == connector_status_disconnected) {
+               memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
+               memset(intel_connector->dp.dsc_dpcd, 0, sizeof(intel_connector->dp.dsc_dpcd));
+--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
+@@ -1986,3 +1986,43 @@ bool intel_dp_mst_crtc_needs_modeset(str
+       return false;
+ }
++
++/*
++ * intel_dp_mst_verify_dpcd_state - verify the MST SW enabled state wrt. the DPCD
++ * @intel_dp: DP port object
++ *
++ * Verify if @intel_dp's MST enabled SW state matches the corresponding DPCD
++ * state. A long HPD pulse - not long enough to be detected as a disconnected
++ * state - could've reset the DPCD state, which requires tearing
++ * down/recreating the MST topology.
++ *
++ * Returns %true if the SW MST enabled and DPCD states match, %false
++ * otherwise.
++ */
++bool intel_dp_mst_verify_dpcd_state(struct intel_dp *intel_dp)
++{
++      struct intel_display *display = to_intel_display(intel_dp);
++      struct intel_connector *connector = intel_dp->attached_connector;
++      struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
++      struct intel_encoder *encoder = &dig_port->base;
++      int ret;
++      u8 val;
++
++      if (!intel_dp->is_mst)
++              return true;
++
++      ret = drm_dp_dpcd_readb(intel_dp->mst_mgr.aux, DP_MSTM_CTRL, &val);
++
++      /* Adjust the expected register value for SST + SideBand. */
++      if (ret < 0 || val != (DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC)) {
++              drm_dbg_kms(display->drm,
++                          "[CONNECTOR:%d:%s][ENCODER:%d:%s] MST mode got reset, removing topology (ret=%d, ctrl=0x%02x)\n",
++                          connector->base.base.id, connector->base.name,
++                          encoder->base.base.id, encoder->base.name,
++                          ret, val);
++
++              return false;
++      }
++
++      return true;
++}
+--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
++++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
+@@ -27,5 +27,6 @@ int intel_dp_mst_atomic_check_link(struc
+                                  struct intel_link_bw_limits *limits);
+ bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
+                                    struct intel_crtc *crtc);
++bool intel_dp_mst_verify_dpcd_state(struct intel_dp *intel_dp);
+ #endif /* __INTEL_DP_MST_H__ */
diff --git a/queue-6.10/drm-i915-dsi-make-lenovo-yoga-tab-3-x90f-dmi-match-less-strict.patch b/queue-6.10/drm-i915-dsi-make-lenovo-yoga-tab-3-x90f-dmi-match-less-strict.patch
new file mode 100644 (file)
index 0000000..dacead8
--- /dev/null
@@ -0,0 +1,40 @@
+From 7d058e6bac9afab6a406e34344ebbfd3068bb2d5 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Fri, 23 Aug 2024 09:50:55 +0200
+Subject: drm/i915/dsi: Make Lenovo Yoga Tab 3 X90F DMI match less strict
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit 7d058e6bac9afab6a406e34344ebbfd3068bb2d5 upstream.
+
+There are 2G and 4G RAM versions of the Lenovo Yoga Tab 3 X90F and it
+turns out that the 2G version has a DMI product name of
+"CHERRYVIEW D1 PLATFORM" where as the 4G version has
+"CHERRYVIEW C0 PLATFORM". The sys-vendor + product-version check are
+unique enough that the product-name check is not necessary.
+
+Drop the product-name check so that the existing DMI match for the 4G
+RAM version also matches the 2G RAM version.
+
+Fixes: f6f4a0862bde ("drm/i915/vlv_dsi: Add DMI quirk for backlight control issues on Lenovo Yoga Tab 3 (v2)")
+Cc: stable@vger.kernel.org
+Acked-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240823075055.17198-1-hdegoede@redhat.com
+(cherry picked from commit a4dbe45c4c14edc316ae94b9af86a28f8c5d8123)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/display/vlv_dsi.c |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
++++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
+@@ -1869,7 +1869,6 @@ static const struct dmi_system_id vlv_ds
+               /* Lenovo Yoga Tab 3 Pro YT3-X90F */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+-                      DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
+               },
+               .driver_data = (void *)vlv_dsi_lenovo_yoga_tab3_backlight_fixup,
diff --git a/queue-6.10/drm-v3d-disable-preemption-while-updating-gpu-stats.patch b/queue-6.10/drm-v3d-disable-preemption-while-updating-gpu-stats.patch
new file mode 100644 (file)
index 0000000..b35f1aa
--- /dev/null
@@ -0,0 +1,78 @@
+From 9d824c7fce58f59982228aa85b0376b113cdfa35 Mon Sep 17 00:00:00 2001
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Date: Tue, 13 Aug 2024 11:25:04 +0100
+Subject: drm/v3d: Disable preemption while updating GPU stats
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+
+commit 9d824c7fce58f59982228aa85b0376b113cdfa35 upstream.
+
+We forgot to disable preemption around the write_seqcount_begin/end() pair
+while updating GPU stats:
+
+  [ ] WARNING: CPU: 2 PID: 12 at include/linux/seqlock.h:221 __seqprop_assert.isra.0+0x128/0x150 [v3d]
+  [ ] Workqueue: v3d_bin drm_sched_run_job_work [gpu_sched]
+ <...snip...>
+  [ ] Call trace:
+  [ ]  __seqprop_assert.isra.0+0x128/0x150 [v3d]
+  [ ]  v3d_job_start_stats.isra.0+0x90/0x218 [v3d]
+  [ ]  v3d_bin_job_run+0x23c/0x388 [v3d]
+  [ ]  drm_sched_run_job_work+0x520/0x6d0 [gpu_sched]
+  [ ]  process_one_work+0x62c/0xb48
+  [ ]  worker_thread+0x468/0x5b0
+  [ ]  kthread+0x1c4/0x1e0
+  [ ]  ret_from_fork+0x10/0x20
+
+Fix it.
+
+Cc: Maíra Canal <mcanal@igalia.com>
+Cc: stable@vger.kernel.org # v6.10+
+Fixes: 6abe93b621ab ("drm/v3d: Fix race-condition between sysfs/fdinfo and interrupt handler")
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Acked-by: Maíra Canal <mcanal@igalia.com>
+Signed-off-by: Maíra Canal <mcanal@igalia.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240813102505.80512-1-tursulin@igalia.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/v3d/v3d_sched.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
+index b8682818bafa..ad1e6236ff6f 100644
+--- a/drivers/gpu/drm/v3d/v3d_sched.c
++++ b/drivers/gpu/drm/v3d/v3d_sched.c
+@@ -134,6 +134,8 @@ v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue)
+       struct v3d_stats *local_stats = &file->stats[queue];
+       u64 now = local_clock();
++      preempt_disable();
++
+       write_seqcount_begin(&local_stats->lock);
+       local_stats->start_ns = now;
+       write_seqcount_end(&local_stats->lock);
+@@ -141,6 +143,8 @@ v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue)
+       write_seqcount_begin(&global_stats->lock);
+       global_stats->start_ns = now;
+       write_seqcount_end(&global_stats->lock);
++
++      preempt_enable();
+ }
+ static void
+@@ -162,8 +166,10 @@ v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue)
+       struct v3d_stats *local_stats = &file->stats[queue];
+       u64 now = local_clock();
++      preempt_disable();
+       v3d_stats_update(local_stats, now);
+       v3d_stats_update(global_stats, now);
++      preempt_enable();
+ }
+ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
+-- 
+2.46.0
+
diff --git a/queue-6.10/drm-vmwgfx-disable-coherent-dumb-buffers-without-3d.patch b/queue-6.10/drm-vmwgfx-disable-coherent-dumb-buffers-without-3d.patch
new file mode 100644 (file)
index 0000000..34bcf98
--- /dev/null
@@ -0,0 +1,53 @@
+From e9fd436bb8fb9b9d31fdf07bbcdba6d30290c5e4 Mon Sep 17 00:00:00 2001
+From: Zack Rusin <zack.rusin@broadcom.com>
+Date: Fri, 16 Aug 2024 14:32:07 -0400
+Subject: drm/vmwgfx: Disable coherent dumb buffers without 3d
+
+From: Zack Rusin <zack.rusin@broadcom.com>
+
+commit e9fd436bb8fb9b9d31fdf07bbcdba6d30290c5e4 upstream.
+
+Coherent surfaces make only sense if the host renders to them using
+accelerated apis. Without 3d the entire content of dumb buffers stays
+in the guest making all of the extra work they're doing to synchronize
+between guest and host useless.
+
+Configurations without 3d also tend to run with very low graphics
+memory limits. The pinned console fb, mob cursors and graphical login
+manager tend to run out of 16MB graphics memory that those guests use.
+
+Fix it by making sure the coherent dumb buffers are only used on
+configs with 3d enabled.
+
+Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
+Fixes: d6667f0ddf46 ("drm/vmwgfx: Fix handling of dumb buffers")
+Reported-by: Christian Heusel <christian@heusel.eu>
+Closes: https://lore.kernel.org/all/0d0330f3-2ac0-4cd5-8075-7f1cbaf72a8e@heusel.eu
+Cc: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
+Cc: dri-devel@lists.freedesktop.org
+Cc: <stable@vger.kernel.org> # v6.9+
+Link: https://patchwork.freedesktop.org/patch/msgid/20240816183332.31961-4-zack.rusin@broadcom.com
+Reviewed-by: Martin Krastev <martin.krastev@broadcom.com>
+Reviewed-by: Maaz Mombasawala <maaz.mombasawala@broadcom.com>
+Tested-by: Benjamin Coddington <bcodding@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_surface.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+@@ -2283,9 +2283,11 @@ int vmw_dumb_create(struct drm_file *fil
+       /*
+        * Without mob support we're just going to use raw memory buffer
+        * because we wouldn't be able to support full surface coherency
+-       * without mobs
++       * without mobs. There also no reason to support surface coherency
++       * without 3d (i.e. gpu usage on the host) because then all the
++       * contents is going to be rendered guest side.
+        */
+-      if (!dev_priv->has_mob) {
++      if (!dev_priv->has_mob || !vmw_supports_3d(dev_priv)) {
+               int cpp = DIV_ROUND_UP(args->bpp, 8);
+               switch (cpp) {
diff --git a/queue-6.10/drm-vmwgfx-fix-prime-with-external-buffers.patch b/queue-6.10/drm-vmwgfx-fix-prime-with-external-buffers.patch
new file mode 100644 (file)
index 0000000..5a48e54
--- /dev/null
@@ -0,0 +1,259 @@
+From 50f1199250912568606b3778dc56646c10cb7b04 Mon Sep 17 00:00:00 2001
+From: Zack Rusin <zack.rusin@broadcom.com>
+Date: Fri, 16 Aug 2024 14:32:06 -0400
+Subject: drm/vmwgfx: Fix prime with external buffers
+
+From: Zack Rusin <zack.rusin@broadcom.com>
+
+commit 50f1199250912568606b3778dc56646c10cb7b04 upstream.
+
+Make sure that for external buffers mapping goes through the dma_buf
+interface instead of trying to access pages directly.
+
+External buffers might not provide direct access to readable/writable
+pages so to make sure the bo's created from external dma_bufs can be
+read dma_buf interface has to be used.
+
+Fixes crashes in IGT's kms_prime with vgem. Regular desktop usage won't
+trigger this due to the fact that virtual machines will not have
+multiple GPUs but it enables better test coverage in IGT.
+
+Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
+Fixes: b32233acceff ("drm/vmwgfx: Fix prime import/export")
+Cc: <stable@vger.kernel.org> # v6.6+
+Cc: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
+Cc: dri-devel@lists.freedesktop.org
+Cc: <stable@vger.kernel.org> # v6.9+
+Link: https://patchwork.freedesktop.org/patch/msgid/20240816183332.31961-3-zack.rusin@broadcom.com
+Reviewed-by: Martin Krastev <martin.krastev@broadcom.com>
+Reviewed-by: Maaz Mombasawala <maaz.mombasawala@broadcom.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_blit.c |  114 +++++++++++++++++++++++++++++++++--
+ drivers/gpu/drm/vmwgfx/vmwgfx_drv.h  |    4 -
+ drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c |   12 +--
+ 3 files changed, 118 insertions(+), 12 deletions(-)
+
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+@@ -27,6 +27,8 @@
+  **************************************************************************/
+ #include "vmwgfx_drv.h"
++
++#include "vmwgfx_bo.h"
+ #include <linux/highmem.h>
+ /*
+@@ -420,13 +422,105 @@ static int vmw_bo_cpu_blit_line(struct v
+       return 0;
+ }
++static void *map_external(struct vmw_bo *bo, struct iosys_map *map)
++{
++      struct vmw_private *vmw =
++              container_of(bo->tbo.bdev, struct vmw_private, bdev);
++      void *ptr = NULL;
++      int ret;
++
++      if (bo->tbo.base.import_attach) {
++              ret = dma_buf_vmap(bo->tbo.base.dma_buf, map);
++              if (ret) {
++                      drm_dbg_driver(&vmw->drm,
++                                     "Wasn't able to map external bo!\n");
++                      goto out;
++              }
++              ptr = map->vaddr;
++      } else {
++              ptr = vmw_bo_map_and_cache(bo);
++      }
++
++out:
++      return ptr;
++}
++
++static void unmap_external(struct vmw_bo *bo, struct iosys_map *map)
++{
++      if (bo->tbo.base.import_attach)
++              dma_buf_vunmap(bo->tbo.base.dma_buf, map);
++      else
++              vmw_bo_unmap(bo);
++}
++
++static int vmw_external_bo_copy(struct vmw_bo *dst, u32 dst_offset,
++                              u32 dst_stride, struct vmw_bo *src,
++                              u32 src_offset, u32 src_stride,
++                              u32 width_in_bytes, u32 height,
++                              struct vmw_diff_cpy *diff)
++{
++      struct vmw_private *vmw =
++              container_of(dst->tbo.bdev, struct vmw_private, bdev);
++      size_t dst_size = dst->tbo.resource->size;
++      size_t src_size = src->tbo.resource->size;
++      struct iosys_map dst_map = {0};
++      struct iosys_map src_map = {0};
++      int ret, i;
++      int x_in_bytes;
++      u8 *vsrc;
++      u8 *vdst;
++
++      vsrc = map_external(src, &src_map);
++      if (!vsrc) {
++              drm_dbg_driver(&vmw->drm, "Wasn't able to map src\n");
++              ret = -ENOMEM;
++              goto out;
++      }
++
++      vdst = map_external(dst, &dst_map);
++      if (!vdst) {
++              drm_dbg_driver(&vmw->drm, "Wasn't able to map dst\n");
++              ret = -ENOMEM;
++              goto out;
++      }
++
++      vsrc += src_offset;
++      vdst += dst_offset;
++      if (src_stride == dst_stride) {
++              dst_size -= dst_offset;
++              src_size -= src_offset;
++              memcpy(vdst, vsrc,
++                     min(dst_stride * height, min(dst_size, src_size)));
++      } else {
++              WARN_ON(dst_stride < width_in_bytes);
++              for (i = 0; i < height; ++i) {
++                      memcpy(vdst, vsrc, width_in_bytes);
++                      vsrc += src_stride;
++                      vdst += dst_stride;
++              }
++      }
++
++      x_in_bytes = (dst_offset % dst_stride);
++      diff->rect.x1 =  x_in_bytes / diff->cpp;
++      diff->rect.y1 = ((dst_offset - x_in_bytes) / dst_stride);
++      diff->rect.x2 = diff->rect.x1 + width_in_bytes / diff->cpp;
++      diff->rect.y2 = diff->rect.y1 + height;
++
++      ret = 0;
++out:
++      unmap_external(src, &src_map);
++      unmap_external(dst, &dst_map);
++
++      return ret;
++}
++
+ /**
+  * vmw_bo_cpu_blit - in-kernel cpu blit.
+  *
+- * @dst: Destination buffer object.
++ * @vmw_dst: Destination buffer object.
+  * @dst_offset: Destination offset of blit start in bytes.
+  * @dst_stride: Destination stride in bytes.
+- * @src: Source buffer object.
++ * @vmw_src: Source buffer object.
+  * @src_offset: Source offset of blit start in bytes.
+  * @src_stride: Source stride in bytes.
+  * @w: Width of blit.
+@@ -444,13 +538,15 @@ static int vmw_bo_cpu_blit_line(struct v
+  * Neither of the buffer objects may be placed in PCI memory
+  * (Fixed memory in TTM terminology) when using this function.
+  */
+-int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
++int vmw_bo_cpu_blit(struct vmw_bo *vmw_dst,
+                   u32 dst_offset, u32 dst_stride,
+-                  struct ttm_buffer_object *src,
++                  struct vmw_bo *vmw_src,
+                   u32 src_offset, u32 src_stride,
+                   u32 w, u32 h,
+                   struct vmw_diff_cpy *diff)
+ {
++      struct ttm_buffer_object *src = &vmw_src->tbo;
++      struct ttm_buffer_object *dst = &vmw_dst->tbo;
+       struct ttm_operation_ctx ctx = {
+               .interruptible = false,
+               .no_wait_gpu = false
+@@ -460,6 +556,11 @@ int vmw_bo_cpu_blit(struct ttm_buffer_ob
+       int ret = 0;
+       struct page **dst_pages = NULL;
+       struct page **src_pages = NULL;
++      bool src_external = (src->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
++      bool dst_external = (dst->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
++
++      if (WARN_ON(dst == src))
++              return -EINVAL;
+       /* Buffer objects need to be either pinned or reserved: */
+       if (!(dst->pin_count))
+@@ -479,6 +580,11 @@ int vmw_bo_cpu_blit(struct ttm_buffer_ob
+                       return ret;
+       }
++      if (src_external || dst_external)
++              return vmw_external_bo_copy(vmw_dst, dst_offset, dst_stride,
++                                          vmw_src, src_offset, src_stride,
++                                          w, h, diff);
++
+       if (!src->ttm->pages && src->ttm->sg) {
+               src_pages = kvmalloc_array(src->ttm->num_pages,
+                                          sizeof(struct page *), GFP_KERNEL);
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -1353,9 +1353,9 @@ void vmw_diff_memcpy(struct vmw_diff_cpy
+ void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n);
+-int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
++int vmw_bo_cpu_blit(struct vmw_bo *dst,
+                   u32 dst_offset, u32 dst_stride,
+-                  struct ttm_buffer_object *src,
++                  struct vmw_bo *src,
+                   u32 src_offset, u32 src_stride,
+                   u32 w, u32 h,
+                   struct vmw_diff_cpy *diff);
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+@@ -502,7 +502,7 @@ static void vmw_stdu_bo_cpu_commit(struc
+               container_of(dirty->unit, typeof(*stdu), base);
+       s32 width, height;
+       s32 src_pitch, dst_pitch;
+-      struct ttm_buffer_object *src_bo, *dst_bo;
++      struct vmw_bo *src_bo, *dst_bo;
+       u32 src_offset, dst_offset;
+       struct vmw_diff_cpy diff = VMW_CPU_BLIT_DIFF_INITIALIZER(stdu->cpp);
+@@ -517,11 +517,11 @@ static void vmw_stdu_bo_cpu_commit(struc
+       /* Assume we are blitting from Guest (bo) to Host (display_srf) */
+       src_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
+-      src_bo = &stdu->display_srf->res.guest_memory_bo->tbo;
++      src_bo = stdu->display_srf->res.guest_memory_bo;
+       src_offset = ddirty->top * src_pitch + ddirty->left * stdu->cpp;
+       dst_pitch = ddirty->pitch;
+-      dst_bo = &ddirty->buf->tbo;
++      dst_bo = ddirty->buf;
+       dst_offset = ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
+       (void) vmw_bo_cpu_blit(dst_bo, dst_offset, dst_pitch,
+@@ -1170,7 +1170,7 @@ vmw_stdu_bo_populate_update_cpu(struct v
+       struct vmw_diff_cpy diff = VMW_CPU_BLIT_DIFF_INITIALIZER(0);
+       struct vmw_stdu_update_gb_image *cmd_img = cmd;
+       struct vmw_stdu_update *cmd_update;
+-      struct ttm_buffer_object *src_bo, *dst_bo;
++      struct vmw_bo *src_bo, *dst_bo;
+       u32 src_offset, dst_offset;
+       s32 src_pitch, dst_pitch;
+       s32 width, height;
+@@ -1184,11 +1184,11 @@ vmw_stdu_bo_populate_update_cpu(struct v
+       diff.cpp = stdu->cpp;
+-      dst_bo = &stdu->display_srf->res.guest_memory_bo->tbo;
++      dst_bo = stdu->display_srf->res.guest_memory_bo;
+       dst_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
+       dst_offset = bb->y1 * dst_pitch + bb->x1 * stdu->cpp;
+-      src_bo = &vfbbo->buffer->tbo;
++      src_bo = vfbbo->buffer;
+       src_pitch = update->vfb->base.pitches[0];
+       src_offset = bo_update->fb_top * src_pitch + bo_update->fb_left *
+               stdu->cpp;
diff --git a/queue-6.10/drm-vmwgfx-prevent-unmapping-active-read-buffers.patch b/queue-6.10/drm-vmwgfx-prevent-unmapping-active-read-buffers.patch
new file mode 100644 (file)
index 0000000..bbb7966
--- /dev/null
@@ -0,0 +1,95 @@
+From aba07b9a0587f50e5d3346eaa19019cf3f86c0ea Mon Sep 17 00:00:00 2001
+From: Zack Rusin <zack.rusin@broadcom.com>
+Date: Fri, 16 Aug 2024 14:32:05 -0400
+Subject: drm/vmwgfx: Prevent unmapping active read buffers
+
+From: Zack Rusin <zack.rusin@broadcom.com>
+
+commit aba07b9a0587f50e5d3346eaa19019cf3f86c0ea upstream.
+
+The kms paths keep a persistent map active to read and compare the cursor
+buffer. These maps can race with each other in simple scenario where:
+a) buffer "a" mapped for update
+b) buffer "a" mapped for compare
+c) do the compare
+d) unmap "a" for compare
+e) update the cursor
+f) unmap "a" for update
+At step "e" the buffer has been unmapped and the read contents is bogus.
+
+Prevent unmapping of active read buffers by simply keeping a count of
+how many paths have currently active maps and unmap only when the count
+reaches 0.
+
+Fixes: 485d98d472d5 ("drm/vmwgfx: Add support for CursorMob and CursorBypass 4")
+Cc: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
+Cc: dri-devel@lists.freedesktop.org
+Cc: <stable@vger.kernel.org> # v5.19+
+Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240816183332.31961-2-zack.rusin@broadcom.com
+Reviewed-by: Martin Krastev <martin.krastev@broadcom.com>
+Reviewed-by: Maaz Mombasawala <maaz.mombasawala@broadcom.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_bo.c |   13 +++++++++++--
+ drivers/gpu/drm/vmwgfx/vmwgfx_bo.h |    3 +++
+ 2 files changed, 14 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+@@ -360,6 +360,8 @@ void *vmw_bo_map_and_cache_size(struct v
+       void *virtual;
+       int ret;
++      atomic_inc(&vbo->map_count);
++
+       virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
+       if (virtual)
+               return virtual;
+@@ -383,11 +385,17 @@ void *vmw_bo_map_and_cache_size(struct v
+  */
+ void vmw_bo_unmap(struct vmw_bo *vbo)
+ {
++      int map_count;
++
+       if (vbo->map.bo == NULL)
+               return;
+-      ttm_bo_kunmap(&vbo->map);
+-      vbo->map.bo = NULL;
++      map_count = atomic_dec_return(&vbo->map_count);
++
++      if (!map_count) {
++              ttm_bo_kunmap(&vbo->map);
++              vbo->map.bo = NULL;
++      }
+ }
+@@ -421,6 +429,7 @@ static int vmw_bo_init(struct vmw_privat
+       vmw_bo->tbo.priority = 3;
+       vmw_bo->res_tree = RB_ROOT;
+       xa_init(&vmw_bo->detached_resources);
++      atomic_set(&vmw_bo->map_count, 0);
+       params->size = ALIGN(params->size, PAGE_SIZE);
+       drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size);
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+@@ -71,6 +71,8 @@ struct vmw_bo_params {
+  * @map: Kmap object for semi-persistent mappings
+  * @res_tree: RB tree of resources using this buffer object as a backing MOB
+  * @res_prios: Eviction priority counts for attached resources
++ * @map_count: The number of currently active maps. Will differ from the
++ * cpu_writers because it includes kernel maps.
+  * @cpu_writers: Number of synccpu write grabs. Protected by reservation when
+  * increased. May be decreased without reservation.
+  * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
+@@ -90,6 +92,7 @@ struct vmw_bo {
+       u32 res_prios[TTM_MAX_BO_PRIORITY];
+       struct xarray detached_resources;
++      atomic_t map_count;
+       atomic_t cpu_writers;
+       /* Not ref-counted.  Protected by binding_mutex */
+       struct vmw_resource *dx_query_ctx;
diff --git a/queue-6.10/mptcp-avoid-duplicated-sub_closed-events.patch b/queue-6.10/mptcp-avoid-duplicated-sub_closed-events.patch
new file mode 100644 (file)
index 0000000..60fe157
--- /dev/null
@@ -0,0 +1,81 @@
+From d82809b6c5f2676b382f77a5cbeb1a5d91ed2235 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Wed, 28 Aug 2024 08:14:35 +0200
+Subject: mptcp: avoid duplicated SUB_CLOSED events
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit d82809b6c5f2676b382f77a5cbeb1a5d91ed2235 upstream.
+
+The initial subflow might have already been closed, but still in the
+connection list. When the worker is instructed to close the subflows
+that have been marked as closed, it might then try to close the initial
+subflow again.
+
+ A consequence of that is that the SUB_CLOSED event can be seen twice:
+
+  # ip mptcp endpoint
+  1.1.1.1 id 1 subflow dev eth0
+  2.2.2.2 id 2 subflow dev eth1
+
+  # ip mptcp monitor &
+  [         CREATED] remid=0 locid=0 saddr4=1.1.1.1 daddr4=9.9.9.9
+  [     ESTABLISHED] remid=0 locid=0 saddr4=1.1.1.1 daddr4=9.9.9.9
+  [  SF_ESTABLISHED] remid=0 locid=2 saddr4=2.2.2.2 daddr4=9.9.9.9
+
+  # ip mptcp endpoint delete id 1
+  [       SF_CLOSED] remid=0 locid=0 saddr4=1.1.1.1 daddr4=9.9.9.9
+  [       SF_CLOSED] remid=0 locid=0 saddr4=1.1.1.1 daddr4=9.9.9.9
+
+The first one is coming from mptcp_pm_nl_rm_subflow_received(), and the
+second one from __mptcp_close_subflow().
+
+To avoid doing the post-closed processing twice, the subflow is now
+marked as closed the first time.
+
+Note that it is not enough to check if we are dealing with the first
+subflow and check its sk_state: the subflow might have been reset or
+closed before calling mptcp_close_ssk().
+
+Fixes: b911c97c7dc7 ("mptcp: add netlink event support")
+Cc: stable@vger.kernel.org
+Tested-by: Arınç ÜNAL <arinc.unal@arinc9.com>
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/protocol.c |    6 ++++++
+ net/mptcp/protocol.h |    3 ++-
+ 2 files changed, 8 insertions(+), 1 deletion(-)
+
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2508,6 +2508,12 @@ out:
+ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+                    struct mptcp_subflow_context *subflow)
+ {
++      /* The first subflow can already be closed and still in the list */
++      if (subflow->close_event_done)
++              return;
++
++      subflow->close_event_done = true;
++
+       if (sk->sk_state == TCP_ESTABLISHED)
+               mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL);
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -519,7 +519,8 @@ struct mptcp_subflow_context {
+               stale : 1,          /* unable to snd/rcv data, do not use for xmit */
+               valid_csum_seen : 1,        /* at least one csum validated */
+               is_mptfo : 1,       /* subflow is doing TFO */
+-              __unused : 10;
++              close_event_done : 1,       /* has done the post-closed part */
++              __unused : 9;
+       bool    data_avail;
+       bool    scheduled;
+       u32     remote_nonce;
diff --git a/queue-6.10/mptcp-close-subflow-when-receiving-tcp-fin.patch b/queue-6.10/mptcp-close-subflow-when-receiving-tcp-fin.patch
new file mode 100644 (file)
index 0000000..70e2e08
--- /dev/null
@@ -0,0 +1,81 @@
+From f09b0ad55a1196f5891663f8888463c0541059cb Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Mon, 26 Aug 2024 19:11:18 +0200
+Subject: mptcp: close subflow when receiving TCP+FIN
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit f09b0ad55a1196f5891663f8888463c0541059cb upstream.
+
+When a peer decides to close one subflow in the middle of a connection
+having multiple subflows, the receiver of the first FIN should accept
+that, and close the subflow on its side as well. If not, the subflow
+will stay half closed, and would even continue to be used until the end
+of the MPTCP connection or a reset from the network.
+
+The issue has not been seen before, probably because the in-kernel
+path-manager always sends a RM_ADDR before closing the subflow. Upon the
+reception of this RM_ADDR, the other peer will initiate the closure on
+its side as well. On the other hand, if the RM_ADDR is lost, or if the
+path-manager of the other peer only closes the subflow without sending a
+RM_ADDR, the subflow would switch to TCP_CLOSE_WAIT, but that's it,
+leaving the subflow half-closed.
+
+So now, when the subflow switches to the TCP_CLOSE_WAIT state, and if
+the MPTCP connection has not been closed before with a DATA_FIN, the
+kernel owning the subflow schedules its worker to initiate the closure
+on its side as well.
+
+This issue can be easily reproduced with packetdrill, as visible in [1],
+by creating an additional subflow, injecting a FIN+ACK before sending
+the DATA_FIN, and expecting a FIN+ACK in return.
+
+Fixes: 40947e13997a ("mptcp: schedule worker when subflow is closed")
+Cc: stable@vger.kernel.org
+Link: https://github.com/multipath-tcp/packetdrill/pull/154 [1]
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20240826-net-mptcp-close-extra-sf-fin-v1-1-905199fe1172@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/protocol.c |    5 ++++-
+ net/mptcp/subflow.c  |    8 ++++++--
+ 2 files changed, 10 insertions(+), 3 deletions(-)
+
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2533,8 +2533,11 @@ static void __mptcp_close_subflow(struct
+       mptcp_for_each_subflow_safe(msk, subflow, tmp) {
+               struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++              int ssk_state = inet_sk_state_load(ssk);
+-              if (inet_sk_state_load(ssk) != TCP_CLOSE)
++              if (ssk_state != TCP_CLOSE &&
++                  (ssk_state != TCP_CLOSE_WAIT ||
++                   inet_sk_state_load(sk) != TCP_ESTABLISHED))
+                       continue;
+               /* 'subflow_data_ready' will re-sched once rx queue is empty */
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1255,12 +1255,16 @@ out:
+ /* sched mptcp worker to remove the subflow if no more data is pending */
+ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
+ {
+-      if (likely(ssk->sk_state != TCP_CLOSE))
++      struct sock *sk = (struct sock *)msk;
++
++      if (likely(ssk->sk_state != TCP_CLOSE &&
++                 (ssk->sk_state != TCP_CLOSE_WAIT ||
++                  inet_sk_state_load(sk) != TCP_ESTABLISHED)))
+               return;
+       if (skb_queue_empty(&ssk->sk_receive_queue) &&
+           !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
+-              mptcp_schedule_work((struct sock *)msk);
++              mptcp_schedule_work(sk);
+ }
+ static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
diff --git a/queue-6.10/mptcp-pm-add_addr-0-is-not-a-new-address.patch b/queue-6.10/mptcp-pm-add_addr-0-is-not-a-new-address.patch
new file mode 100644 (file)
index 0000000..4b77773
--- /dev/null
@@ -0,0 +1,80 @@
+From 57f86203b41c98b322119dfdbb1ec54ce5e3369b Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Wed, 28 Aug 2024 08:14:37 +0200
+Subject: mptcp: pm: ADD_ADDR 0 is not a new address
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 57f86203b41c98b322119dfdbb1ec54ce5e3369b upstream.
+
+The ADD_ADDR 0 with the address from the initial subflow should not be
+considered as a new address: this is not something new. If the host
+receives it, it simply means that the address is available again.
+
+When receiving an ADD_ADDR for the ID 0, the PM already doesn't consider
+it as new by not incrementing the 'add_addr_accepted' counter. But the
+'accept_addr' might not be set if the limit has already been reached:
+this can be bypassed in this case. But before, it is important to check
+that this ADD_ADDR for the ID 0 is for the same address as the initial
+subflow. If not, it is not something that should happen, and the
+ADD_ADDR can be ignored.
+
+Note that if an ADD_ADDR is received while there is already a subflow
+opened using the same address, this ADD_ADDR is ignored as well. It
+means that if multiple ADD_ADDR for ID 0 are received, there will not be
+any duplicated subflows created by the client.
+
+Fixes: d0876b2284cf ("mptcp: add the incoming RM_ADDR support")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm.c         |    4 +++-
+ net/mptcp/pm_netlink.c |    9 +++++++++
+ net/mptcp/protocol.h   |    2 ++
+ 3 files changed, 14 insertions(+), 1 deletion(-)
+
+--- a/net/mptcp/pm.c
++++ b/net/mptcp/pm.c
+@@ -226,7 +226,9 @@ void mptcp_pm_add_addr_received(const st
+               } else {
+                       __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
+               }
+-      } else if (!READ_ONCE(pm->accept_addr)) {
++      /* id0 should not have a different address */
++      } else if ((addr->id == 0 && !mptcp_pm_nl_is_init_remote_addr(msk, addr)) ||
++                 (addr->id > 0 && !READ_ONCE(pm->accept_addr))) {
+               mptcp_pm_announce_addr(msk, addr, true);
+               mptcp_pm_add_addr_send_ack(msk);
+       } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -760,6 +760,15 @@ static void mptcp_pm_nl_add_addr_receive
+       }
+ }
++bool mptcp_pm_nl_is_init_remote_addr(struct mptcp_sock *msk,
++                                   const struct mptcp_addr_info *remote)
++{
++      struct mptcp_addr_info mpc_remote;
++
++      remote_address((struct sock_common *)msk, &mpc_remote);
++      return mptcp_addresses_equal(&mpc_remote, remote, remote->port);
++}
++
+ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
+ {
+       struct mptcp_subflow_context *subflow;
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -988,6 +988,8 @@ void mptcp_pm_add_addr_received(const st
+ void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
+                             const struct mptcp_addr_info *addr);
+ void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk);
++bool mptcp_pm_nl_is_init_remote_addr(struct mptcp_sock *msk,
++                                   const struct mptcp_addr_info *remote);
+ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk);
+ void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
+                              const struct mptcp_rm_list *rm_list);
diff --git a/queue-6.10/mptcp-pm-do-not-remove-already-closed-subflows.patch b/queue-6.10/mptcp-pm-do-not-remove-already-closed-subflows.patch
new file mode 100644 (file)
index 0000000..1539f1a
--- /dev/null
@@ -0,0 +1,34 @@
+From 58e1b66b4e4b8a602d3f2843e8eba00a969ecce2 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Wed, 28 Aug 2024 08:14:32 +0200
+Subject: mptcp: pm: do not remove already closed subflows
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 58e1b66b4e4b8a602d3f2843e8eba00a969ecce2 upstream.
+
+It is possible to have in the list already closed subflows, e.g. the
+initial subflow has been already closed, but still in the list. No need
+to try to close it again, and increments the related counters again.
+
+Fixes: 0ee4261a3681 ("mptcp: implement mptcp_pm_remove_subflow")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -838,6 +838,8 @@ static void mptcp_pm_nl_rm_addr_or_subfl
+                       int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
+                       u8 id = subflow_get_local_id(subflow);
++                      if (inet_sk_state_load(ssk) == TCP_CLOSE)
++                              continue;
+                       if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id)
+                               continue;
+                       if (rm_type == MPTCP_MIB_RMSUBFLOW && id != rm_id)
diff --git a/queue-6.10/mptcp-pm-fix-id-0-endp-usage-after-multiple-re-creations.patch b/queue-6.10/mptcp-pm-fix-id-0-endp-usage-after-multiple-re-creations.patch
new file mode 100644 (file)
index 0000000..c810f0d
--- /dev/null
@@ -0,0 +1,66 @@
+From 9366922adc6a71378ca01f898c41be295309f044 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Wed, 28 Aug 2024 08:14:33 +0200
+Subject: mptcp: pm: fix ID 0 endp usage after multiple re-creations
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 9366922adc6a71378ca01f898c41be295309f044 upstream.
+
+'local_addr_used' and 'add_addr_accepted' are decremented for addresses
+not related to the initial subflow (ID0), because the source and
+destination addresses of the initial subflows are known from the
+beginning: they don't count as "additional local address being used" or
+"ADD_ADDR being accepted".
+
+It is then required not to increment them when the entrypoint used by
+the initial subflow is removed and re-added during a connection. Without
+this modification, this entrypoint cannot be removed and re-added more
+than once.
+
+Reported-by: Arınç ÜNAL <arinc.unal@arinc9.com>
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/512
+Fixes: 3ad14f54bd74 ("mptcp: more accurate MPC endpoint tracking")
+Reported-by: syzbot+455d38ecd5f655fc45cf@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/00000000000049861306209237f4@google.com
+Cc: stable@vger.kernel.org
+Tested-by: Arınç ÜNAL <arinc.unal@arinc9.com>
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c |    7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -615,12 +615,13 @@ subflow:
+               fullmesh = !!(local.flags & MPTCP_PM_ADDR_FLAG_FULLMESH);
+-              msk->pm.local_addr_used++;
+               __clear_bit(local.addr.id, msk->pm.id_avail_bitmap);
+               /* Special case for ID0: set the correct ID */
+               if (local.addr.id == msk->mpc_endpoint_id)
+                       local.addr.id = 0;
++              else /* local_addr_used is not decr for ID 0 */
++                      msk->pm.local_addr_used++;
+               nr = fill_remote_addresses_vec(msk, &local.addr, fullmesh, addrs);
+               if (nr == 0)
+@@ -750,7 +751,9 @@ static void mptcp_pm_nl_add_addr_receive
+       spin_lock_bh(&msk->pm.lock);
+       if (sf_created) {
+-              msk->pm.add_addr_accepted++;
++              /* add_addr_accepted is not decr for ID 0 */
++              if (remote.id)
++                      msk->pm.add_addr_accepted++;
+               if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
+                   msk->pm.subflows >= subflows_max)
+                       WRITE_ONCE(msk->pm.accept_addr, false);
diff --git a/queue-6.10/mptcp-pm-fix-rm_addr-id-for-the-initial-subflow.patch b/queue-6.10/mptcp-pm-fix-rm_addr-id-for-the-initial-subflow.patch
new file mode 100644 (file)
index 0000000..c953dff
--- /dev/null
@@ -0,0 +1,138 @@
+From 87b5896f3f7848130095656739b05881904e2697 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Wed, 28 Aug 2024 08:14:25 +0200
+Subject: mptcp: pm: fix RM_ADDR ID for the initial subflow
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 87b5896f3f7848130095656739b05881904e2697 upstream.
+
+The initial subflow has a special local ID: 0. When an endpoint is being
+deleted, it is then important to check if its address is not linked to
+the initial subflow to send the right ID.
+
+If there was an endpoint linked to the initial subflow, msk's
+mpc_endpoint_id field will be set. We can then use this info when an
+endpoint is being removed to see if it is linked to the initial subflow.
+
+So now, the correct IDs are passed to mptcp_pm_nl_rm_addr_or_subflow(),
+it is no longer needed to use mptcp_local_id_match().
+
+Fixes: 3ad14f54bd74 ("mptcp: more accurate MPC endpoint tracking")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c |   28 +++++++++++++++-------------
+ 1 file changed, 15 insertions(+), 13 deletions(-)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -806,11 +806,6 @@ int mptcp_pm_nl_mp_prio_send_ack(struct
+       return -EINVAL;
+ }
+-static bool mptcp_local_id_match(const struct mptcp_sock *msk, u8 local_id, u8 id)
+-{
+-      return local_id == id || (!local_id && msk->mpc_endpoint_id == id);
+-}
+-
+ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
+                                          const struct mptcp_rm_list *rm_list,
+                                          enum linux_mptcp_mib_field rm_type)
+@@ -845,7 +840,7 @@ static void mptcp_pm_nl_rm_addr_or_subfl
+                       if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id)
+                               continue;
+-                      if (rm_type == MPTCP_MIB_RMSUBFLOW && !mptcp_local_id_match(msk, id, rm_id))
++                      if (rm_type == MPTCP_MIB_RMSUBFLOW && id != rm_id)
+                               continue;
+                       pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u\n",
+@@ -1461,6 +1456,12 @@ static bool remove_anno_list_by_saddr(st
+       return false;
+ }
++static u8 mptcp_endp_get_local_id(struct mptcp_sock *msk,
++                                const struct mptcp_addr_info *addr)
++{
++      return msk->mpc_endpoint_id == addr->id ? 0 : addr->id;
++}
++
+ static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
+                                     const struct mptcp_addr_info *addr,
+                                     bool force)
+@@ -1468,7 +1469,7 @@ static bool mptcp_pm_remove_anno_addr(st
+       struct mptcp_rm_list list = { .nr = 0 };
+       bool ret;
+-      list.ids[list.nr++] = addr->id;
++      list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr);
+       ret = remove_anno_list_by_saddr(msk, addr);
+       if (ret || force) {
+@@ -1495,14 +1496,12 @@ static int mptcp_nl_remove_subflow_and_s
+                                                  const struct mptcp_pm_addr_entry *entry)
+ {
+       const struct mptcp_addr_info *addr = &entry->addr;
+-      struct mptcp_rm_list list = { .nr = 0 };
++      struct mptcp_rm_list list = { .nr = 1 };
+       long s_slot = 0, s_num = 0;
+       struct mptcp_sock *msk;
+       pr_debug("remove_id=%d\n", addr->id);
+-      list.ids[list.nr++] = addr->id;
+-
+       while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
+               struct sock *sk = (struct sock *)msk;
+               bool remove_subflow;
+@@ -1520,6 +1519,7 @@ static int mptcp_nl_remove_subflow_and_s
+               mptcp_pm_remove_anno_addr(msk, addr, remove_subflow &&
+                                         !(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT));
++              list.ids[0] = mptcp_endp_get_local_id(msk, addr);
+               if (remove_subflow) {
+                       spin_lock_bh(&msk->pm.lock);
+                       mptcp_pm_nl_rm_subflow_received(msk, &list);
+@@ -1628,6 +1628,7 @@ int mptcp_pm_nl_del_addr_doit(struct sk_
+       return ret;
+ }
++/* Called from the userspace PM only */
+ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
+ {
+       struct mptcp_rm_list alist = { .nr = 0 };
+@@ -1656,6 +1657,7 @@ void mptcp_pm_remove_addrs(struct mptcp_
+       }
+ }
++/* Called from the in-kernel PM only */
+ static void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
+                                              struct list_head *rm_list)
+ {
+@@ -1665,11 +1667,11 @@ static void mptcp_pm_remove_addrs_and_su
+       list_for_each_entry(entry, rm_list, list) {
+               if (slist.nr < MPTCP_RM_IDS_MAX &&
+                   lookup_subflow_by_saddr(&msk->conn_list, &entry->addr))
+-                      slist.ids[slist.nr++] = entry->addr.id;
++                      slist.ids[slist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr);
+               if (alist.nr < MPTCP_RM_IDS_MAX &&
+                   remove_anno_list_by_saddr(msk, &entry->addr))
+-                      alist.ids[alist.nr++] = entry->addr.id;
++                      alist.ids[alist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr);
+       }
+       spin_lock_bh(&msk->pm.lock);
+@@ -1966,7 +1968,7 @@ static void mptcp_pm_nl_fullmesh(struct
+ {
+       struct mptcp_rm_list list = { .nr = 0 };
+-      list.ids[list.nr++] = addr->id;
++      list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr);
+       spin_lock_bh(&msk->pm.lock);
+       mptcp_pm_nl_rm_subflow_received(msk, &list);
diff --git a/queue-6.10/mptcp-pm-reset-mpc-endp-id-when-re-added.patch b/queue-6.10/mptcp-pm-reset-mpc-endp-id-when-re-added.patch
new file mode 100644 (file)
index 0000000..85a44fc
--- /dev/null
@@ -0,0 +1,82 @@
+From dce1c6d1e92535f165219695a826caedcca4e9b9 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Wed, 28 Aug 2024 08:14:29 +0200
+Subject: mptcp: pm: reset MPC endp ID when re-added
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit dce1c6d1e92535f165219695a826caedcca4e9b9 upstream.
+
+The initial subflow has a special local ID: 0. It is specific per
+connection.
+
+When a global endpoint is deleted and re-added later, it can have a
+different ID -- most services managing the endpoints automatically don't
+force the ID to be the same as before. It is then important to track
+these modifications to be consistent with the ID being used for the
+address used by the initial subflow, not to confuse the other peer or to
+send the ID 0 for the wrong address.
+
+Now when removing an endpoint, msk->mpc_endpoint_id is reset if it
+corresponds to this endpoint. When adding a new endpoint, the same
+variable is updated if the address match the one of the initial subflow.
+
+Fixes: 3ad14f54bd74 ("mptcp: more accurate MPC endpoint tracking")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c |   13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -1320,20 +1320,27 @@ static struct pm_nl_pernet *genl_info_pm
+       return pm_nl_get_pernet(genl_info_net(info));
+ }
+-static int mptcp_nl_add_subflow_or_signal_addr(struct net *net)
++static int mptcp_nl_add_subflow_or_signal_addr(struct net *net,
++                                             struct mptcp_addr_info *addr)
+ {
+       struct mptcp_sock *msk;
+       long s_slot = 0, s_num = 0;
+       while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
+               struct sock *sk = (struct sock *)msk;
++              struct mptcp_addr_info mpc_addr;
+               if (!READ_ONCE(msk->fully_established) ||
+                   mptcp_pm_is_userspace(msk))
+                       goto next;
++              /* if the endp linked to the init sf is re-added with a != ID */
++              mptcp_local_address((struct sock_common *)msk, &mpc_addr);
++
+               lock_sock(sk);
+               spin_lock_bh(&msk->pm.lock);
++              if (mptcp_addresses_equal(addr, &mpc_addr, addr->port))
++                      msk->mpc_endpoint_id = addr->id;
+               mptcp_pm_create_subflow_or_signal_addr(msk);
+               spin_unlock_bh(&msk->pm.lock);
+               release_sock(sk);
+@@ -1406,7 +1413,7 @@ int mptcp_pm_nl_add_addr_doit(struct sk_
+               goto out_free;
+       }
+-      mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk));
++      mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk), &entry->addr);
+       return 0;
+ out_free:
+@@ -1522,6 +1529,8 @@ static int mptcp_nl_remove_subflow_and_s
+                       spin_unlock_bh(&msk->pm.lock);
+               }
++              if (msk->mpc_endpoint_id == entry->addr.id)
++                      msk->mpc_endpoint_id = 0;
+               release_sock(sk);
+ next:
diff --git a/queue-6.10/mptcp-pm-reuse-id-0-after-delete-and-re-add.patch b/queue-6.10/mptcp-pm-reuse-id-0-after-delete-and-re-add.patch
new file mode 100644 (file)
index 0000000..2ddf43e
--- /dev/null
@@ -0,0 +1,52 @@
+From 8b8ed1b429f8fa7ebd5632555e7b047bc0620075 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Wed, 28 Aug 2024 08:14:24 +0200
+Subject: mptcp: pm: reuse ID 0 after delete and re-add
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 8b8ed1b429f8fa7ebd5632555e7b047bc0620075 upstream.
+
+When the endpoint used by the initial subflow is removed and re-added
+later, the PM has to force the ID 0, it is a special case imposed by the
+MPTCP specs.
+
+Note that the endpoint should then need to be re-added reusing the same
+ID.
+
+Fixes: 3ad14f54bd74 ("mptcp: more accurate MPC endpoint tracking")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c |   10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -585,6 +585,11 @@ static void mptcp_pm_create_subflow_or_s
+               __clear_bit(local.addr.id, msk->pm.id_avail_bitmap);
+               msk->pm.add_addr_signaled++;
++
++              /* Special case for ID0: set the correct ID */
++              if (local.addr.id == msk->mpc_endpoint_id)
++                      local.addr.id = 0;
++
+               mptcp_pm_announce_addr(msk, &local.addr, false);
+               mptcp_pm_nl_addr_send_ack(msk);
+@@ -609,6 +614,11 @@ subflow:
+               msk->pm.local_addr_used++;
+               __clear_bit(local.addr.id, msk->pm.id_avail_bitmap);
++
++              /* Special case for ID0: set the correct ID */
++              if (local.addr.id == msk->mpc_endpoint_id)
++                      local.addr.id = 0;
++
+               nr = fill_remote_addresses_vec(msk, &local.addr, fullmesh, addrs);
+               if (nr == 0)
+                       continue;
diff --git a/queue-6.10/mptcp-pm-send-ack-on-an-active-subflow.patch b/queue-6.10/mptcp-pm-send-ack-on-an-active-subflow.patch
new file mode 100644 (file)
index 0000000..f438d79
--- /dev/null
@@ -0,0 +1,41 @@
+From c07cc3ed895f9bfe0c53b5ed6be710c133b4271c Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Wed, 28 Aug 2024 08:14:27 +0200
+Subject: mptcp: pm: send ACK on an active subflow
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit c07cc3ed895f9bfe0c53b5ed6be710c133b4271c upstream.
+
+Taking the first one on the list doesn't work in some cases, e.g. if the
+initial subflow is being removed. Pick another one instead of not
+sending anything.
+
+Fixes: 84dfe3677a6f ("mptcp: send out dedicated ADD_ADDR packet")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c |    9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -768,9 +768,12 @@ void mptcp_pm_nl_addr_send_ack(struct mp
+           !mptcp_pm_should_rm_signal(msk))
+               return;
+-      subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node);
+-      if (subflow)
+-              mptcp_pm_send_ack(msk, subflow, false, false);
++      mptcp_for_each_subflow(msk, subflow) {
++              if (__mptcp_subflow_active(subflow)) {
++                      mptcp_pm_send_ack(msk, subflow, false, false);
++                      break;
++              }
++      }
+ }
+ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
diff --git a/queue-6.10/mptcp-pm-skip-connecting-to-already-established-sf.patch b/queue-6.10/mptcp-pm-skip-connecting-to-already-established-sf.patch
new file mode 100644 (file)
index 0000000..a71c189
--- /dev/null
@@ -0,0 +1,54 @@
+From bc19ff57637ff563d2bdf2b385b48c41e6509e0d Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Wed, 28 Aug 2024 08:14:28 +0200
+Subject: mptcp: pm: skip connecting to already established sf
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit bc19ff57637ff563d2bdf2b385b48c41e6509e0d upstream.
+
+The lookup_subflow_by_daddr() helper checks if there is already a
+subflow connected to this address. But there could be a subflow that is
+closing, but taking time due to some reasons: latency, losses, data to
+process, etc.
+
+If an ADD_ADDR is received while the endpoint is being closed, it is
+better to try connecting to it, instead of rejecting it: the peer which
+has sent the ADD_ADDR will not be notified that the ADD_ADDR has been
+rejected for this reason, and the expected subflow will not be created
+at the end.
+
+This helper should then only look for subflows that are established, or
+going to be, but not the ones being closed.
+
+Fixes: d84ad04941c3 ("mptcp: skip connecting the connected address")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c |    9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -130,12 +130,15 @@ static bool lookup_subflow_by_daddr(cons
+ {
+       struct mptcp_subflow_context *subflow;
+       struct mptcp_addr_info cur;
+-      struct sock_common *skc;
+       list_for_each_entry(subflow, list, node) {
+-              skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
++              struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+-              remote_address(skc, &cur);
++              if (!((1 << inet_sk_state_load(ssk)) &
++                    (TCPF_ESTABLISHED | TCPF_SYN_SENT | TCPF_SYN_RECV)))
++                      continue;
++
++              remote_address((struct sock_common *)ssk, &cur);
+               if (mptcp_addresses_equal(&cur, daddr, daddr->port))
+                       return true;
+       }
diff --git a/queue-6.10/mptcp-pr_debug-add-missing-n-at-the-end.patch b/queue-6.10/mptcp-pr_debug-add-missing-n-at-the-end.patch
new file mode 100644 (file)
index 0000000..4c65f68
--- /dev/null
@@ -0,0 +1,1036 @@
+From cb41b195e634d3f1ecfcd845314e64fd4bb3c7aa Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Mon, 26 Aug 2024 19:11:21 +0200
+Subject: mptcp: pr_debug: add missing \n at the end
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit cb41b195e634d3f1ecfcd845314e64fd4bb3c7aa upstream.
+
+pr_debug() have been added in various places in MPTCP code to help
+developers to debug some situations. With the dynamic debug feature, it
+is easy to enable all or some of them, and asks users to reproduce
+issues with extra debug.
+
+Many of these pr_debug() don't end with a new line, while no 'pr_cont()'
+are used in MPTCP code. So the goal was not to display multiple debug
+messages on one line: they were then not missing the '\n' on purpose.
+Not having the new line at the end causes these messages to be printed
+with a delay, when something else needs to be printed. This issue is not
+visible when many messages need to be printed, but it is annoying and
+confusing when only specific messages are expected, e.g.
+
+  # echo "func mptcp_pm_add_addr_echoed +fmp" \
+        > /sys/kernel/debug/dynamic_debug/control
+  # ./mptcp_join.sh "signal address"; \
+        echo "$(awk '{print $1}' /proc/uptime) - end"; \
+        sleep 5s; \
+        echo "$(awk '{print $1}' /proc/uptime) - restart"; \
+        ./mptcp_join.sh "signal address"
+  013 signal address
+      (...)
+  10.75 - end
+  15.76 - restart
+  013 signal address
+  [  10.367935] mptcp:mptcp_pm_add_addr_echoed: MPTCP: msk=(...)
+      (...)
+
+  => a delay of 5 seconds: printed with a 10.36 ts, but after 'restart'
+     which was printed at the 15.76 ts.
+
+The 'Fixes' tag here below points to the first pr_debug() used without
+'\n' in net/mptcp. This patch could be split in many small ones, with
+different Fixes tag, but it doesn't seem worth it, because it is easy to
+re-generate this patch with this simple 'sed' command:
+
+  git grep -l pr_debug -- net/mptcp |
+    xargs sed -i "s/\(pr_debug(\".*[^n]\)\(\"[,)]\)/\1\\\n\2/g"
+
+So in case of conflicts, simply drop the modifications, and launch this
+command.
+
+Fixes: f870fa0b5768 ("mptcp: Add MPTCP socket stubs")
+Cc: stable@vger.kernel.org
+Reviewed-by: Geliang Tang <geliang@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20240826-net-mptcp-close-extra-sf-fin-v1-4-905199fe1172@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/fastopen.c   |    4 +--
+ net/mptcp/options.c    |   50 +++++++++++++++++++++++------------------------
+ net/mptcp/pm.c         |   28 +++++++++++++-------------
+ net/mptcp/pm_netlink.c |   20 +++++++++---------
+ net/mptcp/protocol.c   |   52 ++++++++++++++++++++++++-------------------------
+ net/mptcp/protocol.h   |    4 +--
+ net/mptcp/sched.c      |    4 +--
+ net/mptcp/sockopt.c    |    4 +--
+ net/mptcp/subflow.c    |   48 ++++++++++++++++++++++-----------------------
+ 9 files changed, 107 insertions(+), 107 deletions(-)
+
+--- a/net/mptcp/fastopen.c
++++ b/net/mptcp/fastopen.c
+@@ -68,12 +68,12 @@ void __mptcp_fastopen_gen_msk_ackseq(str
+       skb = skb_peek_tail(&sk->sk_receive_queue);
+       if (skb) {
+               WARN_ON_ONCE(MPTCP_SKB_CB(skb)->end_seq);
+-              pr_debug("msk %p moving seq %llx -> %llx end_seq %llx -> %llx", sk,
++              pr_debug("msk %p moving seq %llx -> %llx end_seq %llx -> %llx\n", sk,
+                        MPTCP_SKB_CB(skb)->map_seq, MPTCP_SKB_CB(skb)->map_seq + msk->ack_seq,
+                        MPTCP_SKB_CB(skb)->end_seq, MPTCP_SKB_CB(skb)->end_seq + msk->ack_seq);
+               MPTCP_SKB_CB(skb)->map_seq += msk->ack_seq;
+               MPTCP_SKB_CB(skb)->end_seq += msk->ack_seq;
+       }
+-      pr_debug("msk=%p ack_seq=%llx", msk, msk->ack_seq);
++      pr_debug("msk=%p ack_seq=%llx\n", msk, msk->ack_seq);
+ }
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -117,7 +117,7 @@ static void mptcp_parse_option(const str
+                       mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD;
+                       ptr += 2;
+               }
+-              pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d csum=%u",
++              pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d csum=%u\n",
+                        version, flags, opsize, mp_opt->sndr_key,
+                        mp_opt->rcvr_key, mp_opt->data_len, mp_opt->csum);
+               break;
+@@ -131,7 +131,7 @@ static void mptcp_parse_option(const str
+                       ptr += 4;
+                       mp_opt->nonce = get_unaligned_be32(ptr);
+                       ptr += 4;
+-                      pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u",
++                      pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u\n",
+                                mp_opt->backup, mp_opt->join_id,
+                                mp_opt->token, mp_opt->nonce);
+               } else if (opsize == TCPOLEN_MPTCP_MPJ_SYNACK) {
+@@ -142,19 +142,19 @@ static void mptcp_parse_option(const str
+                       ptr += 8;
+                       mp_opt->nonce = get_unaligned_be32(ptr);
+                       ptr += 4;
+-                      pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u",
++                      pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u\n",
+                                mp_opt->backup, mp_opt->join_id,
+                                mp_opt->thmac, mp_opt->nonce);
+               } else if (opsize == TCPOLEN_MPTCP_MPJ_ACK) {
+                       mp_opt->suboptions |= OPTION_MPTCP_MPJ_ACK;
+                       ptr += 2;
+                       memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN);
+-                      pr_debug("MP_JOIN hmac");
++                      pr_debug("MP_JOIN hmac\n");
+               }
+               break;
+       case MPTCPOPT_DSS:
+-              pr_debug("DSS");
++              pr_debug("DSS\n");
+               ptr++;
+               /* we must clear 'mpc_map' be able to detect MP_CAPABLE
+@@ -169,7 +169,7 @@ static void mptcp_parse_option(const str
+               mp_opt->ack64 = (flags & MPTCP_DSS_ACK64) != 0;
+               mp_opt->use_ack = (flags & MPTCP_DSS_HAS_ACK);
+-              pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d",
++              pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d\n",
+                        mp_opt->data_fin, mp_opt->dsn64,
+                        mp_opt->use_map, mp_opt->ack64,
+                        mp_opt->use_ack);
+@@ -207,7 +207,7 @@ static void mptcp_parse_option(const str
+                               ptr += 4;
+                       }
+-                      pr_debug("data_ack=%llu", mp_opt->data_ack);
++                      pr_debug("data_ack=%llu\n", mp_opt->data_ack);
+               }
+               if (mp_opt->use_map) {
+@@ -231,7 +231,7 @@ static void mptcp_parse_option(const str
+                               ptr += 2;
+                       }
+-                      pr_debug("data_seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
++                      pr_debug("data_seq=%llu subflow_seq=%u data_len=%u csum=%d:%u\n",
+                                mp_opt->data_seq, mp_opt->subflow_seq,
+                                mp_opt->data_len, !!(mp_opt->suboptions & OPTION_MPTCP_CSUMREQD),
+                                mp_opt->csum);
+@@ -293,7 +293,7 @@ static void mptcp_parse_option(const str
+                       mp_opt->ahmac = get_unaligned_be64(ptr);
+                       ptr += 8;
+               }
+-              pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d",
++              pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d\n",
+                        (mp_opt->addr.family == AF_INET6) ? "6" : "",
+                        mp_opt->addr.id, mp_opt->ahmac, mp_opt->echo, ntohs(mp_opt->addr.port));
+               break;
+@@ -309,7 +309,7 @@ static void mptcp_parse_option(const str
+               mp_opt->rm_list.nr = opsize - TCPOLEN_MPTCP_RM_ADDR_BASE;
+               for (i = 0; i < mp_opt->rm_list.nr; i++)
+                       mp_opt->rm_list.ids[i] = *ptr++;
+-              pr_debug("RM_ADDR: rm_list_nr=%d", mp_opt->rm_list.nr);
++              pr_debug("RM_ADDR: rm_list_nr=%d\n", mp_opt->rm_list.nr);
+               break;
+       case MPTCPOPT_MP_PRIO:
+@@ -318,7 +318,7 @@ static void mptcp_parse_option(const str
+               mp_opt->suboptions |= OPTION_MPTCP_PRIO;
+               mp_opt->backup = *ptr++ & MPTCP_PRIO_BKUP;
+-              pr_debug("MP_PRIO: prio=%d", mp_opt->backup);
++              pr_debug("MP_PRIO: prio=%d\n", mp_opt->backup);
+               break;
+       case MPTCPOPT_MP_FASTCLOSE:
+@@ -329,7 +329,7 @@ static void mptcp_parse_option(const str
+               mp_opt->rcvr_key = get_unaligned_be64(ptr);
+               ptr += 8;
+               mp_opt->suboptions |= OPTION_MPTCP_FASTCLOSE;
+-              pr_debug("MP_FASTCLOSE: recv_key=%llu", mp_opt->rcvr_key);
++              pr_debug("MP_FASTCLOSE: recv_key=%llu\n", mp_opt->rcvr_key);
+               break;
+       case MPTCPOPT_RST:
+@@ -343,7 +343,7 @@ static void mptcp_parse_option(const str
+               flags = *ptr++;
+               mp_opt->reset_transient = flags & MPTCP_RST_TRANSIENT;
+               mp_opt->reset_reason = *ptr;
+-              pr_debug("MP_RST: transient=%u reason=%u",
++              pr_debug("MP_RST: transient=%u reason=%u\n",
+                        mp_opt->reset_transient, mp_opt->reset_reason);
+               break;
+@@ -354,7 +354,7 @@ static void mptcp_parse_option(const str
+               ptr += 2;
+               mp_opt->suboptions |= OPTION_MPTCP_FAIL;
+               mp_opt->fail_seq = get_unaligned_be64(ptr);
+-              pr_debug("MP_FAIL: data_seq=%llu", mp_opt->fail_seq);
++              pr_debug("MP_FAIL: data_seq=%llu\n", mp_opt->fail_seq);
+               break;
+       default:
+@@ -417,7 +417,7 @@ bool mptcp_syn_options(struct sock *sk,
+               *size = TCPOLEN_MPTCP_MPC_SYN;
+               return true;
+       } else if (subflow->request_join) {
+-              pr_debug("remote_token=%u, nonce=%u", subflow->remote_token,
++              pr_debug("remote_token=%u, nonce=%u\n", subflow->remote_token,
+                        subflow->local_nonce);
+               opts->suboptions = OPTION_MPTCP_MPJ_SYN;
+               opts->join_id = subflow->local_id;
+@@ -500,7 +500,7 @@ static bool mptcp_established_options_mp
+                       *size = TCPOLEN_MPTCP_MPC_ACK;
+               }
+-              pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d",
++              pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d\n",
+                        subflow, subflow->local_key, subflow->remote_key,
+                        data_len);
+@@ -509,7 +509,7 @@ static bool mptcp_established_options_mp
+               opts->suboptions = OPTION_MPTCP_MPJ_ACK;
+               memcpy(opts->hmac, subflow->hmac, MPTCPOPT_HMAC_LEN);
+               *size = TCPOLEN_MPTCP_MPJ_ACK;
+-              pr_debug("subflow=%p", subflow);
++              pr_debug("subflow=%p\n", subflow);
+               /* we can use the full delegate action helper only from BH context
+                * If we are in process context - sk is flushing the backlog at
+@@ -675,7 +675,7 @@ static bool mptcp_established_options_ad
+       *size = len;
+       if (drop_other_suboptions) {
+-              pr_debug("drop other suboptions");
++              pr_debug("drop other suboptions\n");
+               opts->suboptions = 0;
+               /* note that e.g. DSS could have written into the memory
+@@ -695,7 +695,7 @@ static bool mptcp_established_options_ad
+       } else {
+               MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADDTX);
+       }
+-      pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d",
++      pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d\n",
+                opts->addr.id, opts->ahmac, echo, ntohs(opts->addr.port));
+       return true;
+@@ -726,7 +726,7 @@ static bool mptcp_established_options_rm
+       opts->rm_list = rm_list;
+       for (i = 0; i < opts->rm_list.nr; i++)
+-              pr_debug("rm_list_ids[%d]=%d", i, opts->rm_list.ids[i]);
++              pr_debug("rm_list_ids[%d]=%d\n", i, opts->rm_list.ids[i]);
+       MPTCP_ADD_STATS(sock_net(sk), MPTCP_MIB_RMADDRTX, opts->rm_list.nr);
+       return true;
+ }
+@@ -752,7 +752,7 @@ static bool mptcp_established_options_mp
+       opts->suboptions |= OPTION_MPTCP_PRIO;
+       opts->backup = subflow->request_bkup;
+-      pr_debug("prio=%d", opts->backup);
++      pr_debug("prio=%d\n", opts->backup);
+       return true;
+ }
+@@ -794,7 +794,7 @@ static bool mptcp_established_options_fa
+       opts->suboptions |= OPTION_MPTCP_FASTCLOSE;
+       opts->rcvr_key = READ_ONCE(msk->remote_key);
+-      pr_debug("FASTCLOSE key=%llu", opts->rcvr_key);
++      pr_debug("FASTCLOSE key=%llu\n", opts->rcvr_key);
+       MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFASTCLOSETX);
+       return true;
+ }
+@@ -816,7 +816,7 @@ static bool mptcp_established_options_mp
+       opts->suboptions |= OPTION_MPTCP_FAIL;
+       opts->fail_seq = subflow->map_seq;
+-      pr_debug("MP_FAIL fail_seq=%llu", opts->fail_seq);
++      pr_debug("MP_FAIL fail_seq=%llu\n", opts->fail_seq);
+       MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILTX);
+       return true;
+@@ -904,7 +904,7 @@ bool mptcp_synack_options(const struct r
+               opts->csum_reqd = subflow_req->csum_reqd;
+               opts->allow_join_id0 = subflow_req->allow_join_id0;
+               *size = TCPOLEN_MPTCP_MPC_SYNACK;
+-              pr_debug("subflow_req=%p, local_key=%llu",
++              pr_debug("subflow_req=%p, local_key=%llu\n",
+                        subflow_req, subflow_req->local_key);
+               return true;
+       } else if (subflow_req->mp_join) {
+@@ -913,7 +913,7 @@ bool mptcp_synack_options(const struct r
+               opts->join_id = subflow_req->local_id;
+               opts->thmac = subflow_req->thmac;
+               opts->nonce = subflow_req->local_nonce;
+-              pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u",
++              pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u\n",
+                        subflow_req, opts->backup, opts->join_id,
+                        opts->thmac, opts->nonce);
+               *size = TCPOLEN_MPTCP_MPJ_SYNACK;
+--- a/net/mptcp/pm.c
++++ b/net/mptcp/pm.c
+@@ -19,7 +19,7 @@ int mptcp_pm_announce_addr(struct mptcp_
+ {
+       u8 add_addr = READ_ONCE(msk->pm.addr_signal);
+-      pr_debug("msk=%p, local_id=%d, echo=%d", msk, addr->id, echo);
++      pr_debug("msk=%p, local_id=%d, echo=%d\n", msk, addr->id, echo);
+       lockdep_assert_held(&msk->pm.lock);
+@@ -45,7 +45,7 @@ int mptcp_pm_remove_addr(struct mptcp_so
+ {
+       u8 rm_addr = READ_ONCE(msk->pm.addr_signal);
+-      pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr);
++      pr_debug("msk=%p, rm_list_nr=%d\n", msk, rm_list->nr);
+       if (rm_addr) {
+               MPTCP_ADD_STATS(sock_net((struct sock *)msk),
+@@ -66,7 +66,7 @@ void mptcp_pm_new_connection(struct mptc
+ {
+       struct mptcp_pm_data *pm = &msk->pm;
+-      pr_debug("msk=%p, token=%u side=%d", msk, READ_ONCE(msk->token), server_side);
++      pr_debug("msk=%p, token=%u side=%d\n", msk, READ_ONCE(msk->token), server_side);
+       WRITE_ONCE(pm->server_side, server_side);
+       mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC);
+@@ -90,7 +90,7 @@ bool mptcp_pm_allow_new_subflow(struct m
+       subflows_max = mptcp_pm_get_subflows_max(msk);
+-      pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
++      pr_debug("msk=%p subflows=%d max=%d allow=%d\n", msk, pm->subflows,
+                subflows_max, READ_ONCE(pm->accept_subflow));
+       /* try to avoid acquiring the lock below */
+@@ -114,7 +114,7 @@ bool mptcp_pm_allow_new_subflow(struct m
+ static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
+                                  enum mptcp_pm_status new_status)
+ {
+-      pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status,
++      pr_debug("msk=%p status=%x new=%lx\n", msk, msk->pm.status,
+                BIT(new_status));
+       if (msk->pm.status & BIT(new_status))
+               return false;
+@@ -129,7 +129,7 @@ void mptcp_pm_fully_established(struct m
+       struct mptcp_pm_data *pm = &msk->pm;
+       bool announce = false;
+-      pr_debug("msk=%p", msk);
++      pr_debug("msk=%p\n", msk);
+       spin_lock_bh(&pm->lock);
+@@ -153,14 +153,14 @@ void mptcp_pm_fully_established(struct m
+ void mptcp_pm_connection_closed(struct mptcp_sock *msk)
+ {
+-      pr_debug("msk=%p", msk);
++      pr_debug("msk=%p\n", msk);
+ }
+ void mptcp_pm_subflow_established(struct mptcp_sock *msk)
+ {
+       struct mptcp_pm_data *pm = &msk->pm;
+-      pr_debug("msk=%p", msk);
++      pr_debug("msk=%p\n", msk);
+       if (!READ_ONCE(pm->work_pending))
+               return;
+@@ -212,7 +212,7 @@ void mptcp_pm_add_addr_received(const st
+       struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+       struct mptcp_pm_data *pm = &msk->pm;
+-      pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
++      pr_debug("msk=%p remote_id=%d accept=%d\n", msk, addr->id,
+                READ_ONCE(pm->accept_addr));
+       mptcp_event_addr_announced(ssk, addr);
+@@ -243,7 +243,7 @@ void mptcp_pm_add_addr_echoed(struct mpt
+ {
+       struct mptcp_pm_data *pm = &msk->pm;
+-      pr_debug("msk=%p", msk);
++      pr_debug("msk=%p\n", msk);
+       spin_lock_bh(&pm->lock);
+@@ -267,7 +267,7 @@ void mptcp_pm_rm_addr_received(struct mp
+       struct mptcp_pm_data *pm = &msk->pm;
+       u8 i;
+-      pr_debug("msk=%p remote_ids_nr=%d", msk, rm_list->nr);
++      pr_debug("msk=%p remote_ids_nr=%d\n", msk, rm_list->nr);
+       for (i = 0; i < rm_list->nr; i++)
+               mptcp_event_addr_removed(msk, rm_list->ids[i]);
+@@ -299,19 +299,19 @@ void mptcp_pm_mp_fail_received(struct so
+       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+       struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+-      pr_debug("fail_seq=%llu", fail_seq);
++      pr_debug("fail_seq=%llu\n", fail_seq);
+       if (!READ_ONCE(msk->allow_infinite_fallback))
+               return;
+       if (!subflow->fail_tout) {
+-              pr_debug("send MP_FAIL response and infinite map");
++              pr_debug("send MP_FAIL response and infinite map\n");
+               subflow->send_mp_fail = 1;
+               subflow->send_infinite_map = 1;
+               tcp_send_ack(sk);
+       } else {
+-              pr_debug("MP_FAIL response received");
++              pr_debug("MP_FAIL response received\n");
+               WRITE_ONCE(subflow->fail_tout, 0);
+       }
+ }
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -287,7 +287,7 @@ static void mptcp_pm_add_timer(struct ti
+       struct mptcp_sock *msk = entry->sock;
+       struct sock *sk = (struct sock *)msk;
+-      pr_debug("msk=%p", msk);
++      pr_debug("msk=%p\n", msk);
+       if (!msk)
+               return;
+@@ -306,7 +306,7 @@ static void mptcp_pm_add_timer(struct ti
+       spin_lock_bh(&msk->pm.lock);
+       if (!mptcp_pm_should_add_signal_addr(msk)) {
+-              pr_debug("retransmit ADD_ADDR id=%d", entry->addr.id);
++              pr_debug("retransmit ADD_ADDR id=%d\n", entry->addr.id);
+               mptcp_pm_announce_addr(msk, &entry->addr, false);
+               mptcp_pm_add_addr_send_ack(msk);
+               entry->retrans_times++;
+@@ -387,7 +387,7 @@ void mptcp_pm_free_anno_list(struct mptc
+       struct sock *sk = (struct sock *)msk;
+       LIST_HEAD(free_list);
+-      pr_debug("msk=%p", msk);
++      pr_debug("msk=%p\n", msk);
+       spin_lock_bh(&msk->pm.lock);
+       list_splice_init(&msk->pm.anno_list, &free_list);
+@@ -473,7 +473,7 @@ static void __mptcp_pm_send_ack(struct m
+       struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+       bool slow;
+-      pr_debug("send ack for %s",
++      pr_debug("send ack for %s\n",
+                prio ? "mp_prio" : (mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr"));
+       slow = lock_sock_fast(ssk);
+@@ -708,7 +708,7 @@ static void mptcp_pm_nl_add_addr_receive
+       add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
+       subflows_max = mptcp_pm_get_subflows_max(msk);
+-      pr_debug("accepted %d:%d remote family %d",
++      pr_debug("accepted %d:%d remote family %d\n",
+                msk->pm.add_addr_accepted, add_addr_accept_max,
+                msk->pm.remote.family);
+@@ -767,7 +767,7 @@ int mptcp_pm_nl_mp_prio_send_ack(struct
+ {
+       struct mptcp_subflow_context *subflow;
+-      pr_debug("bkup=%d", bkup);
++      pr_debug("bkup=%d\n", bkup);
+       mptcp_for_each_subflow(msk, subflow) {
+               struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+@@ -803,7 +803,7 @@ static void mptcp_pm_nl_rm_addr_or_subfl
+       struct sock *sk = (struct sock *)msk;
+       u8 i;
+-      pr_debug("%s rm_list_nr %d",
++      pr_debug("%s rm_list_nr %d\n",
+                rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", rm_list->nr);
+       msk_owned_by_me(msk);
+@@ -832,7 +832,7 @@ static void mptcp_pm_nl_rm_addr_or_subfl
+                       if (rm_type == MPTCP_MIB_RMSUBFLOW && !mptcp_local_id_match(msk, id, rm_id))
+                               continue;
+-                      pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u",
++                      pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u\n",
+                                rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow",
+                                i, rm_id, id, remote_id, msk->mpc_endpoint_id);
+                       spin_unlock_bh(&msk->pm.lock);
+@@ -889,7 +889,7 @@ void mptcp_pm_nl_work(struct mptcp_sock
+       spin_lock_bh(&msk->pm.lock);
+-      pr_debug("msk=%p status=%x", msk, pm->status);
++      pr_debug("msk=%p status=%x\n", msk, pm->status);
+       if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
+               pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
+               mptcp_pm_nl_add_addr_received(msk);
+@@ -1476,7 +1476,7 @@ static int mptcp_nl_remove_subflow_and_s
+       long s_slot = 0, s_num = 0;
+       struct mptcp_sock *msk;
+-      pr_debug("remove_id=%d", addr->id);
++      pr_debug("remove_id=%d\n", addr->id);
+       list.ids[list.nr++] = addr->id;
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -139,7 +139,7 @@ static bool mptcp_try_coalesce(struct so
+           !skb_try_coalesce(to, from, &fragstolen, &delta))
+               return false;
+-      pr_debug("colesced seq %llx into %llx new len %d new end seq %llx",
++      pr_debug("colesced seq %llx into %llx new len %d new end seq %llx\n",
+                MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq,
+                to->len, MPTCP_SKB_CB(from)->end_seq);
+       MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq;
+@@ -217,7 +217,7 @@ static void mptcp_data_queue_ofo(struct
+       end_seq = MPTCP_SKB_CB(skb)->end_seq;
+       max_seq = atomic64_read(&msk->rcv_wnd_sent);
+-      pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk, seq, max_seq,
++      pr_debug("msk=%p seq=%llx limit=%llx empty=%d\n", msk, seq, max_seq,
+                RB_EMPTY_ROOT(&msk->out_of_order_queue));
+       if (after64(end_seq, max_seq)) {
+               /* out of window */
+@@ -643,7 +643,7 @@ static bool __mptcp_move_skbs_from_subfl
+               }
+       }
+-      pr_debug("msk=%p ssk=%p", msk, ssk);
++      pr_debug("msk=%p ssk=%p\n", msk, ssk);
+       tp = tcp_sk(ssk);
+       do {
+               u32 map_remaining, offset;
+@@ -724,7 +724,7 @@ static bool __mptcp_ofo_queue(struct mpt
+       u64 end_seq;
+       p = rb_first(&msk->out_of_order_queue);
+-      pr_debug("msk=%p empty=%d", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
++      pr_debug("msk=%p empty=%d\n", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
+       while (p) {
+               skb = rb_to_skb(p);
+               if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq))
+@@ -746,7 +746,7 @@ static bool __mptcp_ofo_queue(struct mpt
+                       int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
+                       /* skip overlapping data, if any */
+-                      pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d",
++                      pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d\n",
+                                MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq,
+                                delta);
+                       MPTCP_SKB_CB(skb)->offset += delta;
+@@ -1240,7 +1240,7 @@ static int mptcp_sendmsg_frag(struct soc
+       size_t copy;
+       int i;
+-      pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u",
++      pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u\n",
+                msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);
+       if (WARN_ON_ONCE(info->sent > info->limit ||
+@@ -1341,7 +1341,7 @@ alloc_skb:
+       mpext->use_map = 1;
+       mpext->dsn64 = 1;
+-      pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
++      pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d\n",
+                mpext->data_seq, mpext->subflow_seq, mpext->data_len,
+                mpext->dsn64);
+@@ -1892,7 +1892,7 @@ static int mptcp_sendmsg(struct sock *sk
+                       if (!msk->first_pending)
+                               WRITE_ONCE(msk->first_pending, dfrag);
+               }
+-              pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d", msk,
++              pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk,
+                        dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
+                        !dfrag_collapsed);
+@@ -2248,7 +2248,7 @@ static int mptcp_recvmsg(struct sock *sk
+                       }
+               }
+-              pr_debug("block timeout %ld", timeo);
++              pr_debug("block timeout %ld\n", timeo);
+               sk_wait_data(sk, &timeo, NULL);
+       }
+@@ -2264,7 +2264,7 @@ out_err:
+               }
+       }
+-      pr_debug("msk=%p rx queue empty=%d:%d copied=%d",
++      pr_debug("msk=%p rx queue empty=%d:%d copied=%d\n",
+                msk, skb_queue_empty_lockless(&sk->sk_receive_queue),
+                skb_queue_empty(&msk->receive_queue), copied);
+       if (!(flags & MSG_PEEK))
+@@ -2717,7 +2717,7 @@ static void mptcp_mp_fail_no_response(st
+       if (!ssk)
+               return;
+-      pr_debug("MP_FAIL doesn't respond, reset the subflow");
++      pr_debug("MP_FAIL doesn't respond, reset the subflow\n");
+       slow = lock_sock_fast(ssk);
+       mptcp_subflow_reset(ssk);
+@@ -2891,7 +2891,7 @@ void mptcp_subflow_shutdown(struct sock
+               break;
+       default:
+               if (__mptcp_check_fallback(mptcp_sk(sk))) {
+-                      pr_debug("Fallback");
++                      pr_debug("Fallback\n");
+                       ssk->sk_shutdown |= how;
+                       tcp_shutdown(ssk, how);
+@@ -2901,7 +2901,7 @@ void mptcp_subflow_shutdown(struct sock
+                       WRITE_ONCE(mptcp_sk(sk)->snd_una, mptcp_sk(sk)->snd_nxt);
+                       mptcp_schedule_work(sk);
+               } else {
+-                      pr_debug("Sending DATA_FIN on subflow %p", ssk);
++                      pr_debug("Sending DATA_FIN on subflow %p\n", ssk);
+                       tcp_send_ack(ssk);
+                       if (!mptcp_rtx_timer_pending(sk))
+                               mptcp_reset_rtx_timer(sk);
+@@ -2967,7 +2967,7 @@ static void mptcp_check_send_data_fin(st
+       struct mptcp_subflow_context *subflow;
+       struct mptcp_sock *msk = mptcp_sk(sk);
+-      pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu",
++      pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu\n",
+                msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk),
+                msk->snd_nxt, msk->write_seq);
+@@ -2991,7 +2991,7 @@ static void __mptcp_wr_shutdown(struct s
+ {
+       struct mptcp_sock *msk = mptcp_sk(sk);
+-      pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d",
++      pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d\n",
+                msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state,
+                !!mptcp_send_head(sk));
+@@ -3006,7 +3006,7 @@ static void __mptcp_destroy_sock(struct
+ {
+       struct mptcp_sock *msk = mptcp_sk(sk);
+-      pr_debug("msk=%p", msk);
++      pr_debug("msk=%p\n", msk);
+       might_sleep();
+@@ -3114,7 +3114,7 @@ cleanup:
+               mptcp_set_state(sk, TCP_CLOSE);
+       sock_hold(sk);
+-      pr_debug("msk=%p state=%d", sk, sk->sk_state);
++      pr_debug("msk=%p state=%d\n", sk, sk->sk_state);
+       if (msk->token)
+               mptcp_event(MPTCP_EVENT_CLOSED, msk, NULL, GFP_KERNEL);
+@@ -3546,7 +3546,7 @@ static int mptcp_get_port(struct sock *s
+ {
+       struct mptcp_sock *msk = mptcp_sk(sk);
+-      pr_debug("msk=%p, ssk=%p", msk, msk->first);
++      pr_debug("msk=%p, ssk=%p\n", msk, msk->first);
+       if (WARN_ON_ONCE(!msk->first))
+               return -EINVAL;
+@@ -3563,7 +3563,7 @@ void mptcp_finish_connect(struct sock *s
+       sk = subflow->conn;
+       msk = mptcp_sk(sk);
+-      pr_debug("msk=%p, token=%u", sk, subflow->token);
++      pr_debug("msk=%p, token=%u\n", sk, subflow->token);
+       subflow->map_seq = subflow->iasn;
+       subflow->map_subflow_seq = 1;
+@@ -3592,7 +3592,7 @@ bool mptcp_finish_join(struct sock *ssk)
+       struct sock *parent = (void *)msk;
+       bool ret = true;
+-      pr_debug("msk=%p, subflow=%p", msk, subflow);
++      pr_debug("msk=%p, subflow=%p\n", msk, subflow);
+       /* mptcp socket already closing? */
+       if (!mptcp_is_fully_established(parent)) {
+@@ -3638,7 +3638,7 @@ err_prohibited:
+ static void mptcp_shutdown(struct sock *sk, int how)
+ {
+-      pr_debug("sk=%p, how=%d", sk, how);
++      pr_debug("sk=%p, how=%d\n", sk, how);
+       if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk))
+               __mptcp_wr_shutdown(sk);
+@@ -3859,7 +3859,7 @@ static int mptcp_listen(struct socket *s
+       struct sock *ssk;
+       int err;
+-      pr_debug("msk=%p", msk);
++      pr_debug("msk=%p\n", msk);
+       lock_sock(sk);
+@@ -3898,7 +3898,7 @@ static int mptcp_stream_accept(struct so
+       struct mptcp_sock *msk = mptcp_sk(sock->sk);
+       struct sock *ssk, *newsk;
+-      pr_debug("msk=%p", msk);
++      pr_debug("msk=%p\n", msk);
+       /* Buggy applications can call accept on socket states other then LISTEN
+        * but no need to allocate the first subflow just to error out.
+@@ -3907,12 +3907,12 @@ static int mptcp_stream_accept(struct so
+       if (!ssk)
+               return -EINVAL;
+-      pr_debug("ssk=%p, listener=%p", ssk, mptcp_subflow_ctx(ssk));
++      pr_debug("ssk=%p, listener=%p\n", ssk, mptcp_subflow_ctx(ssk));
+       newsk = inet_csk_accept(ssk, arg);
+       if (!newsk)
+               return arg->err;
+-      pr_debug("newsk=%p, subflow is mptcp=%d", newsk, sk_is_mptcp(newsk));
++      pr_debug("newsk=%p, subflow is mptcp=%d\n", newsk, sk_is_mptcp(newsk));
+       if (sk_is_mptcp(newsk)) {
+               struct mptcp_subflow_context *subflow;
+               struct sock *new_mptcp_sock;
+@@ -4005,7 +4005,7 @@ static __poll_t mptcp_poll(struct file *
+       sock_poll_wait(file, sock, wait);
+       state = inet_sk_state_load(sk);
+-      pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
++      pr_debug("msk=%p state=%d flags=%lx\n", msk, state, msk->flags);
+       if (state == TCP_LISTEN) {
+               struct sock *ssk = READ_ONCE(msk->first);
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -1172,7 +1172,7 @@ static inline bool mptcp_check_fallback(
+ static inline void __mptcp_do_fallback(struct mptcp_sock *msk)
+ {
+       if (__mptcp_check_fallback(msk)) {
+-              pr_debug("TCP fallback already done (msk=%p)", msk);
++              pr_debug("TCP fallback already done (msk=%p)\n", msk);
+               return;
+       }
+       set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
+@@ -1208,7 +1208,7 @@ static inline void mptcp_do_fallback(str
+       }
+ }
+-#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)", __func__, a)
++#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)\n", __func__, a)
+ static inline bool mptcp_check_infinite_map(struct sk_buff *skb)
+ {
+--- a/net/mptcp/sched.c
++++ b/net/mptcp/sched.c
+@@ -86,7 +86,7 @@ int mptcp_register_scheduler(struct mptc
+       list_add_tail_rcu(&sched->list, &mptcp_sched_list);
+       spin_unlock(&mptcp_sched_list_lock);
+-      pr_debug("%s registered", sched->name);
++      pr_debug("%s registered\n", sched->name);
+       return 0;
+ }
+@@ -118,7 +118,7 @@ int mptcp_init_sched(struct mptcp_sock *
+       if (msk->sched->init)
+               msk->sched->init(msk);
+-      pr_debug("sched=%s", msk->sched->name);
++      pr_debug("sched=%s\n", msk->sched->name);
+       return 0;
+ }
+--- a/net/mptcp/sockopt.c
++++ b/net/mptcp/sockopt.c
+@@ -873,7 +873,7 @@ int mptcp_setsockopt(struct sock *sk, in
+       struct mptcp_sock *msk = mptcp_sk(sk);
+       struct sock *ssk;
+-      pr_debug("msk=%p", msk);
++      pr_debug("msk=%p\n", msk);
+       if (level == SOL_SOCKET)
+               return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen);
+@@ -1453,7 +1453,7 @@ int mptcp_getsockopt(struct sock *sk, in
+       struct mptcp_sock *msk = mptcp_sk(sk);
+       struct sock *ssk;
+-      pr_debug("msk=%p", msk);
++      pr_debug("msk=%p\n", msk);
+       /* @@ the meaning of setsockopt() when the socket is connected and
+        * there are multiple subflows is not yet defined. It is up to the
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -39,7 +39,7 @@ static void subflow_req_destructor(struc
+ {
+       struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
+-      pr_debug("subflow_req=%p", subflow_req);
++      pr_debug("subflow_req=%p\n", subflow_req);
+       if (subflow_req->msk)
+               sock_put((struct sock *)subflow_req->msk);
+@@ -146,7 +146,7 @@ static int subflow_check_req(struct requ
+       struct mptcp_options_received mp_opt;
+       bool opt_mp_capable, opt_mp_join;
+-      pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
++      pr_debug("subflow_req=%p, listener=%p\n", subflow_req, listener);
+ #ifdef CONFIG_TCP_MD5SIG
+       /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
+@@ -221,7 +221,7 @@ again:
+               }
+               if (subflow_use_different_sport(subflow_req->msk, sk_listener)) {
+-                      pr_debug("syn inet_sport=%d %d",
++                      pr_debug("syn inet_sport=%d %d\n",
+                                ntohs(inet_sk(sk_listener)->inet_sport),
+                                ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
+                       if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
+@@ -243,7 +243,7 @@ again:
+                       subflow_init_req_cookie_join_save(subflow_req, skb);
+               }
+-              pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
++              pr_debug("token=%u, remote_nonce=%u msk=%p\n", subflow_req->token,
+                        subflow_req->remote_nonce, subflow_req->msk);
+       }
+@@ -527,7 +527,7 @@ static void subflow_finish_connect(struc
+       subflow->rel_write_seq = 1;
+       subflow->conn_finished = 1;
+       subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
+-      pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
++      pr_debug("subflow=%p synack seq=%x\n", subflow, subflow->ssn_offset);
+       mptcp_get_options(skb, &mp_opt);
+       if (subflow->request_mptcp) {
+@@ -559,7 +559,7 @@ static void subflow_finish_connect(struc
+               subflow->thmac = mp_opt.thmac;
+               subflow->remote_nonce = mp_opt.nonce;
+               WRITE_ONCE(subflow->remote_id, mp_opt.join_id);
+-              pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d",
++              pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d\n",
+                        subflow, subflow->thmac, subflow->remote_nonce,
+                        subflow->backup);
+@@ -585,7 +585,7 @@ static void subflow_finish_connect(struc
+                       MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKBACKUPRX);
+               if (subflow_use_different_dport(msk, sk)) {
+-                      pr_debug("synack inet_dport=%d %d",
++                      pr_debug("synack inet_dport=%d %d\n",
+                                ntohs(inet_sk(sk)->inet_dport),
+                                ntohs(inet_sk(parent)->inet_dport));
+                       MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX);
+@@ -655,7 +655,7 @@ static int subflow_v4_conn_request(struc
+ {
+       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+-      pr_debug("subflow=%p", subflow);
++      pr_debug("subflow=%p\n", subflow);
+       /* Never answer to SYNs sent to broadcast or multicast */
+       if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
+@@ -686,7 +686,7 @@ static int subflow_v6_conn_request(struc
+ {
+       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+-      pr_debug("subflow=%p", subflow);
++      pr_debug("subflow=%p\n", subflow);
+       if (skb->protocol == htons(ETH_P_IP))
+               return subflow_v4_conn_request(sk, skb);
+@@ -807,7 +807,7 @@ static struct sock *subflow_syn_recv_soc
+       struct mptcp_sock *owner;
+       struct sock *child;
+-      pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
++      pr_debug("listener=%p, req=%p, conn=%p\n", listener, req, listener->conn);
+       /* After child creation we must look for MPC even when options
+        * are not parsed
+@@ -898,7 +898,7 @@ create_child:
+                       ctx->conn = (struct sock *)owner;
+                       if (subflow_use_different_sport(owner, sk)) {
+-                              pr_debug("ack inet_sport=%d %d",
++                              pr_debug("ack inet_sport=%d %d\n",
+                                        ntohs(inet_sk(sk)->inet_sport),
+                                        ntohs(inet_sk((struct sock *)owner)->inet_sport));
+                               if (!mptcp_pm_sport_in_anno_list(owner, sk)) {
+@@ -961,7 +961,7 @@ enum mapping_status {
+ static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
+ {
+-      pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
++      pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d\n",
+                ssn, subflow->map_subflow_seq, subflow->map_data_len);
+ }
+@@ -1121,7 +1121,7 @@ static enum mapping_status get_mapping_s
+       data_len = mpext->data_len;
+       if (data_len == 0) {
+-              pr_debug("infinite mapping received");
++              pr_debug("infinite mapping received\n");
+               MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
+               subflow->map_data_len = 0;
+               return MAPPING_INVALID;
+@@ -1133,7 +1133,7 @@ static enum mapping_status get_mapping_s
+               if (data_len == 1) {
+                       bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
+                                                                mpext->dsn64);
+-                      pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
++                      pr_debug("DATA_FIN with no payload seq=%llu\n", mpext->data_seq);
+                       if (subflow->map_valid) {
+                               /* A DATA_FIN might arrive in a DSS
+                                * option before the previous mapping
+@@ -1159,7 +1159,7 @@ static enum mapping_status get_mapping_s
+                       data_fin_seq &= GENMASK_ULL(31, 0);
+               mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
+-              pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d",
++              pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d\n",
+                        data_fin_seq, mpext->dsn64);
+               /* Adjust for DATA_FIN using 1 byte of sequence space */
+@@ -1205,7 +1205,7 @@ static enum mapping_status get_mapping_s
+       if (unlikely(subflow->map_csum_reqd != csum_reqd))
+               return MAPPING_INVALID;
+-      pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
++      pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u\n",
+                subflow->map_seq, subflow->map_subflow_seq,
+                subflow->map_data_len, subflow->map_csum_reqd,
+                subflow->map_data_csum);
+@@ -1240,7 +1240,7 @@ static void mptcp_subflow_discard_data(s
+       avail_len = skb->len - offset;
+       incr = limit >= avail_len ? avail_len + fin : limit;
+-      pr_debug("discarding=%d len=%d offset=%d seq=%d", incr, skb->len,
++      pr_debug("discarding=%d len=%d offset=%d seq=%d\n", incr, skb->len,
+                offset, subflow->map_subflow_seq);
+       MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
+       tcp_sk(ssk)->copied_seq += incr;
+@@ -1341,7 +1341,7 @@ static bool subflow_check_data_avail(str
+               old_ack = READ_ONCE(msk->ack_seq);
+               ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
+-              pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
++              pr_debug("msk ack_seq=%llx subflow ack_seq=%llx\n", old_ack,
+                        ack_seq);
+               if (unlikely(before64(ack_seq, old_ack))) {
+                       mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
+@@ -1413,7 +1413,7 @@ bool mptcp_subflow_data_available(struct
+               subflow->map_valid = 0;
+               WRITE_ONCE(subflow->data_avail, false);
+-              pr_debug("Done with mapping: seq=%u data_len=%u",
++              pr_debug("Done with mapping: seq=%u data_len=%u\n",
+                        subflow->map_subflow_seq,
+                        subflow->map_data_len);
+       }
+@@ -1523,7 +1523,7 @@ void mptcpv6_handle_mapped(struct sock *
+       target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
+-      pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
++      pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d\n",
+                subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
+       if (likely(icsk->icsk_af_ops == target))
+@@ -1616,7 +1616,7 @@ int __mptcp_subflow_connect(struct sock
+               goto failed;
+       mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
+-      pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
++      pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d\n", msk,
+                remote_token, local_id, remote_id);
+       subflow->remote_token = remote_token;
+       WRITE_ONCE(subflow->remote_id, remote_id);
+@@ -1751,7 +1751,7 @@ int mptcp_subflow_create_socket(struct s
+       SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
+       subflow = mptcp_subflow_ctx(sf->sk);
+-      pr_debug("subflow=%p", subflow);
++      pr_debug("subflow=%p\n", subflow);
+       *new_sock = sf;
+       sock_hold(sk);
+@@ -1780,7 +1780,7 @@ static struct mptcp_subflow_context *sub
+       INIT_LIST_HEAD(&ctx->node);
+       INIT_LIST_HEAD(&ctx->delegated_node);
+-      pr_debug("subflow=%p", ctx);
++      pr_debug("subflow=%p\n", ctx);
+       ctx->tcp_sock = sk;
+       WRITE_ONCE(ctx->local_id, -1);
+@@ -1931,7 +1931,7 @@ static int subflow_ulp_init(struct sock
+               goto out;
+       }
+-      pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
++      pr_debug("subflow=%p, family=%d\n", ctx, sk->sk_family);
+       tp->is_mptcp = 1;
+       ctx->icsk_af_ops = icsk->icsk_af_ops;
diff --git a/queue-6.10/mptcp-sched-check-both-backup-in-retrans.patch b/queue-6.10/mptcp-sched-check-both-backup-in-retrans.patch
new file mode 100644 (file)
index 0000000..c17cdc2
--- /dev/null
@@ -0,0 +1,48 @@
+From 2a1f596ebb23eadc0f9b95a8012e18ef76295fc8 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Mon, 26 Aug 2024 19:11:20 +0200
+Subject: mptcp: sched: check both backup in retrans
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 2a1f596ebb23eadc0f9b95a8012e18ef76295fc8 upstream.
+
+The 'mptcp_subflow_context' structure has two items related to the
+backup flags:
+
+ - 'backup': the subflow has been marked as backup by the other peer
+
+ - 'request_bkup': the backup flag has been set by the host
+
+Looking only at the 'backup' flag can make sense in some cases, but it
+is not the behaviour of the default packet scheduler when selecting
+paths.
+
+As explained in the commit b6a66e521a20 ("mptcp: sched: check both
+directions for backup"), the packet scheduler should look at both flags,
+because that was the behaviour from the beginning: the 'backup' flag was
+set by accident instead of the 'request_bkup' one. Now that the latter
+has been fixed, get_retrans() needs to be adapted as well.
+
+Fixes: b6a66e521a20 ("mptcp: sched: check both directions for backup")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20240826-net-mptcp-close-extra-sf-fin-v1-3-905199fe1172@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/protocol.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2326,7 +2326,7 @@ struct sock *mptcp_subflow_get_retrans(s
+                       continue;
+               }
+-              if (subflow->backup) {
++              if (subflow->backup || subflow->request_bkup) {
+                       if (!backup)
+                               backup = ssk;
+                       continue;
diff --git a/queue-6.10/net-mana-fix-race-of-mana_hwc_post_rx_wqe-and-new-hwc-response.patch b/queue-6.10/net-mana-fix-race-of-mana_hwc_post_rx_wqe-and-new-hwc-response.patch
new file mode 100644 (file)
index 0000000..bbff422
--- /dev/null
@@ -0,0 +1,133 @@
+From 8af174ea863c72f25ce31cee3baad8a301c0cf0f Mon Sep 17 00:00:00 2001
+From: Haiyang Zhang <haiyangz@microsoft.com>
+Date: Wed, 21 Aug 2024 13:42:29 -0700
+Subject: net: mana: Fix race of mana_hwc_post_rx_wqe and new hwc response
+
+From: Haiyang Zhang <haiyangz@microsoft.com>
+
+commit 8af174ea863c72f25ce31cee3baad8a301c0cf0f upstream.
+
+The mana_hwc_rx_event_handler() / mana_hwc_handle_resp() calls
+complete(&ctx->comp_event) before posting the wqe back. It's
+possible that other callers, like mana_create_txq(), start the
+next round of mana_hwc_send_request() before the posting of wqe.
+And if the HW is fast enough to respond, it can hit no_wqe error
+on the HW channel, then the response message is lost. The mana
+driver may fail to create queues and open, because of waiting for
+the HW response and timed out.
+Sample dmesg:
+[  528.610840] mana 39d4:00:02.0: HWC: Request timed out!
+[  528.614452] mana 39d4:00:02.0: Failed to send mana message: -110, 0x0
+[  528.618326] mana 39d4:00:02.0 enP14804s2: Failed to create WQ object: -110
+
+To fix it, move posting of rx wqe before complete(&ctx->comp_event).
+
+Cc: stable@vger.kernel.org
+Fixes: ca9c54d2d6a5 ("net: mana: Add a driver for Microsoft Azure Network Adapter (MANA)")
+Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
+Reviewed-by: Long Li <longli@microsoft.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/microsoft/mana/hw_channel.c |   62 ++++++++++++-----------
+ 1 file changed, 34 insertions(+), 28 deletions(-)
+
+--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
++++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
+@@ -52,9 +52,33 @@ static int mana_hwc_verify_resp_msg(cons
+       return 0;
+ }
++static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
++                              struct hwc_work_request *req)
++{
++      struct device *dev = hwc_rxq->hwc->dev;
++      struct gdma_sge *sge;
++      int err;
++
++      sge = &req->sge;
++      sge->address = (u64)req->buf_sge_addr;
++      sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
++      sge->size = req->buf_len;
++
++      memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
++      req->wqe_req.sgl = sge;
++      req->wqe_req.num_sge = 1;
++      req->wqe_req.client_data_unit = 0;
++
++      err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
++      if (err)
++              dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
++      return err;
++}
++
+ static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
+-                               const struct gdma_resp_hdr *resp_msg)
++                               struct hwc_work_request *rx_req)
+ {
++      const struct gdma_resp_hdr *resp_msg = rx_req->buf_va;
+       struct hwc_caller_ctx *ctx;
+       int err;
+@@ -62,6 +86,7 @@ static void mana_hwc_handle_resp(struct
+                     hwc->inflight_msg_res.map)) {
+               dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
+                       resp_msg->response.hwc_msg_id);
++              mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
+               return;
+       }
+@@ -75,30 +100,13 @@ static void mana_hwc_handle_resp(struct
+       memcpy(ctx->output_buf, resp_msg, resp_len);
+ out:
+       ctx->error = err;
+-      complete(&ctx->comp_event);
+-}
+-
+-static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
+-                              struct hwc_work_request *req)
+-{
+-      struct device *dev = hwc_rxq->hwc->dev;
+-      struct gdma_sge *sge;
+-      int err;
+-
+-      sge = &req->sge;
+-      sge->address = (u64)req->buf_sge_addr;
+-      sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
+-      sge->size = req->buf_len;
+-      memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
+-      req->wqe_req.sgl = sge;
+-      req->wqe_req.num_sge = 1;
+-      req->wqe_req.client_data_unit = 0;
++      /* Must post rx wqe before complete(), otherwise the next rx may
++       * hit no_wqe error.
++       */
++      mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
+-      err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
+-      if (err)
+-              dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
+-      return err;
++      complete(&ctx->comp_event);
+ }
+ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
+@@ -235,14 +243,12 @@ static void mana_hwc_rx_event_handler(vo
+               return;
+       }
+-      mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp);
++      mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, rx_req);
+-      /* Do no longer use 'resp', because the buffer is posted to the HW
+-       * in the below mana_hwc_post_rx_wqe().
++      /* Can no longer use 'resp', because the buffer is posted to the HW
++       * in mana_hwc_handle_resp() above.
+        */
+       resp = NULL;
+-
+-      mana_hwc_post_rx_wqe(hwc_rxq, rx_req);
+ }
+ static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id,
diff --git a/queue-6.10/selftests-mptcp-join-cannot-rm-sf-if-closed.patch b/queue-6.10/selftests-mptcp-join-cannot-rm-sf-if-closed.patch
new file mode 100644 (file)
index 0000000..07c56dd
--- /dev/null
@@ -0,0 +1,80 @@
+From e93681afcb96864ec26c3b2ce94008ce93577373 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Mon, 26 Aug 2024 19:11:19 +0200
+Subject: selftests: mptcp: join: cannot rm sf if closed
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit e93681afcb96864ec26c3b2ce94008ce93577373 upstream.
+
+Thanks to the previous commit, the MPTCP subflows are now closed on both
+directions even when only the MPTCP path-manager of one peer asks for
+their closure.
+
+In the two tests modified here -- "userspace pm add & remove address"
+and "userspace pm create destroy subflow" -- one peer is controlled by
+the userspace PM, and the other one by the in-kernel PM. When the
+userspace PM sends a RM_ADDR notification, the in-kernel PM will
+automatically react by closing all subflows using this address. Now,
+thanks to the previous commit, the subflows are properly closed on both
+directions, the userspace PM can then no longer closes the same
+subflows if they are already closed. Before, it was OK to do that,
+because the subflows were still half-opened, still OK to send a RM_ADDR.
+
+In other words, thanks to the previous commit closing the subflows, an
+error will be returned to the userspace if it tries to close a subflow
+that has already been closed. So no need to run this command, which mean
+that the linked counters will then not be incremented.
+
+These tests are then no longer sending both a RM_ADDR, then closing the
+linked subflow just after. The test with the userspace PM on the server
+side is now removing one subflow linked to one address, then sending
+a RM_ADDR for another address. The test with the userspace PM on the
+client side is now only removing the subflow that was previously
+created.
+
+Fixes: 4369c198e599 ("selftests: mptcp: test userspace pm out of transfer")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20240826-net-mptcp-close-extra-sf-fin-v1-2-905199fe1172@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/mptcp/mptcp_join.sh |   11 ++++-------
+ 1 file changed, 4 insertions(+), 7 deletions(-)
+
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -3429,14 +3429,12 @@ userspace_tests()
+                       "signal"
+               userspace_pm_chk_get_addr "${ns1}" "10" "id 10 flags signal 10.0.2.1"
+               userspace_pm_chk_get_addr "${ns1}" "20" "id 20 flags signal 10.0.3.1"
+-              userspace_pm_rm_addr $ns1 10
+               userspace_pm_rm_sf $ns1 "::ffff:10.0.2.1" $MPTCP_LIB_EVENT_SUB_ESTABLISHED
+               userspace_pm_chk_dump_addr "${ns1}" \
+-                      "id 20 flags signal 10.0.3.1" "after rm_addr 10"
++                      "id 20 flags signal 10.0.3.1" "after rm_sf 10"
+               userspace_pm_rm_addr $ns1 20
+-              userspace_pm_rm_sf $ns1 10.0.3.1 $MPTCP_LIB_EVENT_SUB_ESTABLISHED
+               userspace_pm_chk_dump_addr "${ns1}" "" "after rm_addr 20"
+-              chk_rm_nr 2 2 invert
++              chk_rm_nr 1 1 invert
+               chk_mptcp_info subflows 0 subflows 0
+               chk_subflows_total 1 1
+               kill_events_pids
+@@ -3460,12 +3458,11 @@ userspace_tests()
+                       "id 20 flags subflow 10.0.3.2" \
+                       "subflow"
+               userspace_pm_chk_get_addr "${ns2}" "20" "id 20 flags subflow 10.0.3.2"
+-              userspace_pm_rm_addr $ns2 20
+               userspace_pm_rm_sf $ns2 10.0.3.2 $MPTCP_LIB_EVENT_SUB_ESTABLISHED
+               userspace_pm_chk_dump_addr "${ns2}" \
+                       "" \
+-                      "after rm_addr 20"
+-              chk_rm_nr 1 1
++                      "after rm_sf 20"
++              chk_rm_nr 0 1
+               chk_mptcp_info subflows 0 subflows 0
+               chk_subflows_total 1 1
+               kill_events_pids
diff --git a/queue-6.10/selftests-mptcp-join-check-re-re-adding-id-0-endp.patch b/queue-6.10/selftests-mptcp-join-check-re-re-adding-id-0-endp.patch
new file mode 100644 (file)
index 0000000..1676a20
--- /dev/null
@@ -0,0 +1,79 @@
+From d397d7246c11ca36c33c932bc36d38e3a79e9aa0 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Wed, 28 Aug 2024 08:14:34 +0200
+Subject: selftests: mptcp: join: check re-re-adding ID 0 endp
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit d397d7246c11ca36c33c932bc36d38e3a79e9aa0 upstream.
+
+This test extends "delete and re-add" to validate the previous commit:
+when the endpoint linked to the initial subflow (ID 0) is re-added
+multiple times, it was no longer being used, because the internal linked
+counters are not decremented for this special endpoint: it is not an
+additional endpoint.
+
+Here, the "del/add id 0" steps are done 3 times to unsure this case is
+validated.
+
+The 'Fixes' tag here below is the same as the one from the previous
+commit: this patch here is not fixing anything wrong in the selftests,
+but it validates the previous fix for an issue introduced by this commit
+ID.
+
+Fixes: 3ad14f54bd74 ("mptcp: more accurate MPC endpoint tracking")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/mptcp/mptcp_join.sh |   27 +++++++++++++-----------
+ 1 file changed, 15 insertions(+), 12 deletions(-)
+
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -3576,7 +3576,7 @@ endpoint_tests()
+               pm_nl_set_limits $ns2 0 3
+               pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow
+               pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
+-              test_linkfail=4 speed=20 \
++              test_linkfail=4 speed=5 \
+                       run_tests $ns1 $ns2 10.0.1.1 &
+               local tests_pid=$!
+@@ -3608,20 +3608,23 @@ endpoint_tests()
+               chk_subflow_nr "after no reject" 3
+               chk_mptcp_info subflows 2 subflows 2
+-              pm_nl_del_endpoint $ns2 1 10.0.1.2
+-              sleep 0.5
+-              chk_subflow_nr "after delete id 0" 2
+-              chk_mptcp_info subflows 2 subflows 2 # only decr for additional sf
+-
+-              pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow
+-              wait_mpj $ns2
+-              chk_subflow_nr "after re-add id 0" 3
+-              chk_mptcp_info subflows 3 subflows 3
++              local i
++              for i in $(seq 3); do
++                      pm_nl_del_endpoint $ns2 1 10.0.1.2
++                      sleep 0.5
++                      chk_subflow_nr "after delete id 0 ($i)" 2
++                      chk_mptcp_info subflows 2 subflows 2 # only decr for additional sf
++
++                      pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow
++                      wait_mpj $ns2
++                      chk_subflow_nr "after re-add id 0 ($i)" 3
++                      chk_mptcp_info subflows 3 subflows 3
++              done
+               mptcp_lib_kill_wait $tests_pid
+-              chk_join_nr 4 4 4
+-              chk_rm_nr 2 2
++              chk_join_nr 6 6 6
++              chk_rm_nr 4 4
+       fi
+ }
diff --git a/queue-6.10/selftests-mptcp-join-check-removing-id-0-endpoint.patch b/queue-6.10/selftests-mptcp-join-check-removing-id-0-endpoint.patch
new file mode 100644 (file)
index 0000000..887becd
--- /dev/null
@@ -0,0 +1,91 @@
+From 5f94b08c001290acda94d9d8868075590931c198 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Wed, 28 Aug 2024 08:14:26 +0200
+Subject: selftests: mptcp: join: check removing ID 0 endpoint
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 5f94b08c001290acda94d9d8868075590931c198 upstream.
+
+Removing the endpoint linked to the initial subflow should trigger a
+RM_ADDR for the right ID, and the removal of the subflow. That's what is
+now being verified in the "delete and re-add" test.
+
+Note that removing the initial subflow will not decrement the 'subflows'
+counters, which corresponds to the *additional* subflows. On the other
+hand, when the same endpoint is re-added, it will increment this
+counter, as it will be seen as an additional subflow this time.
+
+The 'Fixes' tag here below is the same as the one from the previous
+commit: this patch here is not fixing anything wrong in the selftests,
+but it validates the previous fix for an issue introduced by this commit
+ID.
+
+Fixes: 3ad14f54bd74 ("mptcp: more accurate MPC endpoint tracking")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/mptcp/mptcp_join.sh |   25 +++++++++++++++++-------
+ 1 file changed, 18 insertions(+), 7 deletions(-)
+
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -3572,8 +3572,9 @@ endpoint_tests()
+       if reset_with_tcp_filter "delete and re-add" ns2 10.0.3.2 REJECT OUTPUT &&
+          mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+-              pm_nl_set_limits $ns1 0 2
+-              pm_nl_set_limits $ns2 0 2
++              pm_nl_set_limits $ns1 0 3
++              pm_nl_set_limits $ns2 0 3
++              pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow
+               pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
+               test_linkfail=4 speed=20 \
+                       run_tests $ns1 $ns2 10.0.1.1 &
+@@ -3582,17 +3583,17 @@ endpoint_tests()
+               wait_mpj $ns2
+               pm_nl_check_endpoint "creation" \
+                       $ns2 10.0.2.2 id 2 flags subflow dev ns2eth2
+-              chk_subflow_nr "before delete" 2
++              chk_subflow_nr "before delete id 2" 2
+               chk_mptcp_info subflows 1 subflows 1
+               pm_nl_del_endpoint $ns2 2 10.0.2.2
+               sleep 0.5
+-              chk_subflow_nr "after delete" 1
++              chk_subflow_nr "after delete id 2" 1
+               chk_mptcp_info subflows 0 subflows 0
+               pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
+               wait_mpj $ns2
+-              chk_subflow_nr "after re-add" 2
++              chk_subflow_nr "after re-add id 2" 2
+               chk_mptcp_info subflows 1 subflows 1
+               pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow
+@@ -3607,10 +3608,20 @@ endpoint_tests()
+               chk_subflow_nr "after no reject" 3
+               chk_mptcp_info subflows 2 subflows 2
++              pm_nl_del_endpoint $ns2 1 10.0.1.2
++              sleep 0.5
++              chk_subflow_nr "after delete id 0" 2
++              chk_mptcp_info subflows 2 subflows 2 # only decr for additional sf
++
++              pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow
++              wait_mpj $ns2
++              chk_subflow_nr "after re-add id 0" 3
++              chk_mptcp_info subflows 3 subflows 3
++
+               mptcp_lib_kill_wait $tests_pid
+-              chk_join_nr 3 3 3
+-              chk_rm_nr 1 1
++              chk_join_nr 4 4 4
++              chk_rm_nr 2 2
+       fi
+ }
diff --git a/queue-6.10/selftests-mptcp-join-no-extra-msg-if-no-counter.patch b/queue-6.10/selftests-mptcp-join-no-extra-msg-if-no-counter.patch
new file mode 100644 (file)
index 0000000..582680e
--- /dev/null
@@ -0,0 +1,90 @@
+From 76a2d8394cc183df872adf04bf636eaf42746449 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Wed, 28 Aug 2024 08:14:31 +0200
+Subject: selftests: mptcp: join: no extra msg if no counter
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 76a2d8394cc183df872adf04bf636eaf42746449 upstream.
+
+The checksum and fail counters might not be available. Then no need to
+display an extra message with missing info.
+
+While at it, fix the indentation around, which is wrong since the same
+commit.
+
+Fixes: 47867f0a7e83 ("selftests: mptcp: join: skip check if MIB counter not supported")
+Cc: stable@vger.kernel.org
+Reviewed-by: Geliang Tang <geliang@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/mptcp/mptcp_join.sh |   16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -1112,26 +1112,26 @@ chk_csum_nr()
+       print_check "sum"
+       count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtDataCsumErr")
+-      if [ "$count" != "$csum_ns1" ]; then
++      if [ -n "$count" ] && [ "$count" != "$csum_ns1" ]; then
+               extra_msg+=" ns1=$count"
+       fi
+       if [ -z "$count" ]; then
+               print_skip
+       elif { [ "$count" != $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 0 ]; } ||
+-         { [ "$count" -lt $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 1 ]; }; then
++           { [ "$count" -lt $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 1 ]; }; then
+               fail_test "got $count data checksum error[s] expected $csum_ns1"
+       else
+               print_ok
+       fi
+       print_check "csum"
+       count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtDataCsumErr")
+-      if [ "$count" != "$csum_ns2" ]; then
++      if [ -n "$count" ] && [ "$count" != "$csum_ns2" ]; then
+               extra_msg+=" ns2=$count"
+       fi
+       if [ -z "$count" ]; then
+               print_skip
+       elif { [ "$count" != $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 0 ]; } ||
+-         { [ "$count" -lt $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 1 ]; }; then
++           { [ "$count" -lt $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 1 ]; }; then
+               fail_test "got $count data checksum error[s] expected $csum_ns2"
+       else
+               print_ok
+@@ -1169,13 +1169,13 @@ chk_fail_nr()
+       print_check "ftx"
+       count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMPFailTx")
+-      if [ "$count" != "$fail_tx" ]; then
++      if [ -n "$count" ] && [ "$count" != "$fail_tx" ]; then
+               extra_msg+=",tx=$count"
+       fi
+       if [ -z "$count" ]; then
+               print_skip
+       elif { [ "$count" != "$fail_tx" ] && [ $allow_tx_lost -eq 0 ]; } ||
+-         { [ "$count" -gt "$fail_tx" ] && [ $allow_tx_lost -eq 1 ]; }; then
++           { [ "$count" -gt "$fail_tx" ] && [ $allow_tx_lost -eq 1 ]; }; then
+               fail_test "got $count MP_FAIL[s] TX expected $fail_tx"
+       else
+               print_ok
+@@ -1183,13 +1183,13 @@ chk_fail_nr()
+       print_check "failrx"
+       count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtMPFailRx")
+-      if [ "$count" != "$fail_rx" ]; then
++      if [ -n "$count" ] && [ "$count" != "$fail_rx" ]; then
+               extra_msg+=",rx=$count"
+       fi
+       if [ -z "$count" ]; then
+               print_skip
+       elif { [ "$count" != "$fail_rx" ] && [ $allow_rx_lost -eq 0 ]; } ||
+-         { [ "$count" -gt "$fail_rx" ] && [ $allow_rx_lost -eq 1 ]; }; then
++           { [ "$count" -gt "$fail_rx" ] && [ $allow_rx_lost -eq 1 ]; }; then
+               fail_test "got $count MP_FAIL[s] RX expected $fail_rx"
+       else
+               print_ok
index 498615046f7a39f931a1652788988ea7263dcea1..69670b058aa01db46b9ae716a79edca0224ad726 100644 (file)
@@ -12,3 +12,31 @@ smb-client-avoid-dereferencing-rdata-null-in-smb2_new_read_req.patch
 pinctrl-rockchip-correct-rk3328-iomux-width-flag-for-gpio2-b-pins.patch
 pinctrl-single-fix-potential-null-dereference-in-pcs_get_function.patch
 netfs-ceph-partially-revert-netfs-replace-pg_fscache-by-setting-folio-private-and-marking-dirty.patch
+wifi-wfx-repair-open-network-ap-mode.patch
+wifi-mwifiex-duplicate-static-structs-used-in-driver-instances.patch
+net-mana-fix-race-of-mana_hwc_post_rx_wqe-and-new-hwc-response.patch
+mptcp-close-subflow-when-receiving-tcp-fin.patch
+mptcp-sched-check-both-backup-in-retrans.patch
+mptcp-pr_debug-add-missing-n-at-the-end.patch
+mptcp-pm-reuse-id-0-after-delete-and-re-add.patch
+mptcp-pm-skip-connecting-to-already-established-sf.patch
+mptcp-pm-reset-mpc-endp-id-when-re-added.patch
+mptcp-pm-send-ack-on-an-active-subflow.patch
+mptcp-pm-fix-rm_addr-id-for-the-initial-subflow.patch
+mptcp-pm-do-not-remove-already-closed-subflows.patch
+mptcp-pm-fix-id-0-endp-usage-after-multiple-re-creations.patch
+mptcp-avoid-duplicated-sub_closed-events.patch
+mptcp-pm-add_addr-0-is-not-a-new-address.patch
+selftests-mptcp-join-cannot-rm-sf-if-closed.patch
+selftests-mptcp-join-check-removing-id-0-endpoint.patch
+selftests-mptcp-join-no-extra-msg-if-no-counter.patch
+selftests-mptcp-join-check-re-re-adding-id-0-endp.patch
+binfmt_elf_fdpic-fix-auxv-size-calculation-when-elf_hwcap2-is-defined.patch
+drm-v3d-disable-preemption-while-updating-gpu-stats.patch
+drm-i915-dsi-make-lenovo-yoga-tab-3-x90f-dmi-match-less-strict.patch
+drm-i915-dp_mst-fix-mst-state-after-a-sink-reset.patch
+drm-amdgpu-align-pp_power_profile_mode-with-kernel-docs.patch
+drm-amdgpu-swsmu-always-force-a-state-reprogram-on-init.patch
+drm-vmwgfx-prevent-unmapping-active-read-buffers.patch
+drm-vmwgfx-fix-prime-with-external-buffers.patch
+drm-vmwgfx-disable-coherent-dumb-buffers-without-3d.patch
diff --git a/queue-6.10/wifi-mwifiex-duplicate-static-structs-used-in-driver-instances.patch b/queue-6.10/wifi-mwifiex-duplicate-static-structs-used-in-driver-instances.patch
new file mode 100644 (file)
index 0000000..35450fd
--- /dev/null
@@ -0,0 +1,84 @@
+From 27ec3c57fcadb43c79ed05b2ea31bc18c72d798a Mon Sep 17 00:00:00 2001
+From: Sascha Hauer <s.hauer@pengutronix.de>
+Date: Fri, 9 Aug 2024 10:11:33 +0200
+Subject: wifi: mwifiex: duplicate static structs used in driver instances
+
+From: Sascha Hauer <s.hauer@pengutronix.de>
+
+commit 27ec3c57fcadb43c79ed05b2ea31bc18c72d798a upstream.
+
+mwifiex_band_2ghz and mwifiex_band_5ghz are statically allocated, but
+used and modified in driver instances. Duplicate them before using
+them in driver instances so that different driver instances do not
+influence each other.
+
+This was observed on a board which has one PCIe and one SDIO mwifiex
+adapter. It blew up in mwifiex_setup_ht_caps(). This was called with
+the statically allocated struct which is modified in this function.
+
+Cc: stable@vger.kernel.org
+Fixes: d6bffe8bb520 ("mwifiex: support for creation of AP interface")
+Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
+Reviewed-by: Francesco Dolcini <francesco.dolcini@toradex.com>
+Acked-by: Brian Norris <briannorris@chromium.org>
+Signed-off-by: Kalle Valo <kvalo@kernel.org>
+Link: https://patch.msgid.link/20240809-mwifiex-duplicate-static-structs-v1-1-6837b903b1a4@pengutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/marvell/mwifiex/cfg80211.c |   32 +++++++++++++++++++-----
+ 1 file changed, 26 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+@@ -4363,11 +4363,27 @@ int mwifiex_register_cfg80211(struct mwi
+       if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info))
+               wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+-      wiphy->bands[NL80211_BAND_2GHZ] = &mwifiex_band_2ghz;
+-      if (adapter->config_bands & BAND_A)
+-              wiphy->bands[NL80211_BAND_5GHZ] = &mwifiex_band_5ghz;
+-      else
++      wiphy->bands[NL80211_BAND_2GHZ] = devm_kmemdup(adapter->dev,
++                                                     &mwifiex_band_2ghz,
++                                                     sizeof(mwifiex_band_2ghz),
++                                                     GFP_KERNEL);
++      if (!wiphy->bands[NL80211_BAND_2GHZ]) {
++              ret = -ENOMEM;
++              goto err;
++      }
++
++      if (adapter->config_bands & BAND_A) {
++              wiphy->bands[NL80211_BAND_5GHZ] = devm_kmemdup(adapter->dev,
++                                                             &mwifiex_band_5ghz,
++                                                             sizeof(mwifiex_band_5ghz),
++                                                             GFP_KERNEL);
++              if (!wiphy->bands[NL80211_BAND_5GHZ]) {
++                      ret = -ENOMEM;
++                      goto err;
++              }
++      } else {
+               wiphy->bands[NL80211_BAND_5GHZ] = NULL;
++      }
+       if (adapter->drcs_enabled && ISSUPP_DRCS_ENABLED(adapter->fw_cap_info))
+               wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_drcs;
+@@ -4461,8 +4477,7 @@ int mwifiex_register_cfg80211(struct mwi
+       if (ret < 0) {
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: wiphy_register failed: %d\n", __func__, ret);
+-              wiphy_free(wiphy);
+-              return ret;
++              goto err;
+       }
+       if (!adapter->regd) {
+@@ -4504,4 +4519,9 @@ int mwifiex_register_cfg80211(struct mwi
+       adapter->wiphy = wiphy;
+       return ret;
++
++err:
++      wiphy_free(wiphy);
++
++      return ret;
+ }
diff --git a/queue-6.10/wifi-wfx-repair-open-network-ap-mode.patch b/queue-6.10/wifi-wfx-repair-open-network-ap-mode.patch
new file mode 100644 (file)
index 0000000..f866248
--- /dev/null
@@ -0,0 +1,66 @@
+From 6d30bb88f623526197c0e18a366e68a4254a2c83 Mon Sep 17 00:00:00 2001
+From: Alexander Sverdlin <alexander.sverdlin@siemens.com>
+Date: Fri, 23 Aug 2024 15:15:20 +0200
+Subject: wifi: wfx: repair open network AP mode
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Alexander Sverdlin <alexander.sverdlin@siemens.com>
+
+commit 6d30bb88f623526197c0e18a366e68a4254a2c83 upstream.
+
+RSN IE missing in beacon is normal in open networks.
+Avoid returning -EINVAL in this case.
+
+Steps to reproduce:
+
+$ cat /etc/wpa_supplicant.conf
+network={
+       ssid="testNet"
+       mode=2
+       key_mgmt=NONE
+}
+
+$ wpa_supplicant -iwlan0 -c /etc/wpa_supplicant.conf
+nl80211: Beacon set failed: -22 (Invalid argument)
+Failed to set beacon parameters
+Interface initialization failed
+wlan0: interface state UNINITIALIZED->DISABLED
+wlan0: AP-DISABLED
+wlan0: Unable to setup interface.
+Failed to initialize AP interface
+
+After the change:
+
+$ wpa_supplicant -iwlan0 -c /etc/wpa_supplicant.conf
+Successfully initialized wpa_supplicant
+wlan0: interface state UNINITIALIZED->ENABLED
+wlan0: AP-ENABLED
+
+Cc: stable@vger.kernel.org
+Fixes: fe0a7776d4d1 ("wifi: wfx: fix possible NULL pointer dereference in wfx_set_mfp_ap()")
+Signed-off-by: Alexander Sverdlin <alexander.sverdlin@siemens.com>
+Reviewed-by: Jérôme Pouiller <jerome.pouiller@silabs.com>
+Signed-off-by: Kalle Valo <kvalo@kernel.org>
+Link: https://patch.msgid.link/20240823131521.3309073-1-alexander.sverdlin@siemens.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/silabs/wfx/sta.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/silabs/wfx/sta.c
++++ b/drivers/net/wireless/silabs/wfx/sta.c
+@@ -352,8 +352,11 @@ static int wfx_set_mfp_ap(struct wfx_vif
+       ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset,
+                                     skb->len - ieoffset);
+-      if (unlikely(!ptr))
++      if (!ptr) {
++              /* No RSN IE is fine in open networks */
++              ret = 0;
+               goto free_skb;
++      }
+       ptr += pairwise_cipher_suite_count_offset;
+       if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))